var/home/core/zuul-output/0000755000175000017500000000000015111605201014515 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015111617662015477 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000004601213415111617653017704 0ustar rootrootNov 26 14:15:32 crc systemd[1]: Starting Kubernetes Kubelet... Nov 26 14:15:32 crc restorecon[4774]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:32 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 14:15:33 crc restorecon[4774]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 14:15:33 crc restorecon[4774]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 26 14:15:33 crc kubenswrapper[5037]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 26 14:15:33 crc kubenswrapper[5037]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 26 14:15:33 crc kubenswrapper[5037]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 26 14:15:33 crc kubenswrapper[5037]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 26 14:15:33 crc kubenswrapper[5037]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 26 14:15:33 crc kubenswrapper[5037]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.637928 5037 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644175 5037 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644210 5037 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644220 5037 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644233 5037 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644244 5037 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644254 5037 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644264 5037 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644273 5037 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644308 5037 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644318 5037 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644326 5037 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644334 5037 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644342 5037 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644349 5037 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644359 5037 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644367 5037 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644376 5037 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644385 5037 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644394 5037 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644403 5037 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644412 5037 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644425 5037 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644436 5037 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644446 5037 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644455 5037 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644463 5037 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644471 5037 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644480 5037 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644488 5037 feature_gate.go:330] unrecognized feature gate: Example Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644495 5037 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644503 5037 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644511 5037 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644519 5037 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644527 5037 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644535 5037 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644543 5037 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644551 5037 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644578 5037 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644588 5037 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644599 5037 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644609 5037 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644619 5037 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644628 5037 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644637 5037 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644652 5037 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644660 5037 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644668 5037 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644676 5037 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644684 5037 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644693 5037 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644702 5037 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644709 5037 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644717 5037 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644725 5037 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644734 5037 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644742 5037 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644749 5037 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644757 5037 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644765 5037 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644773 5037 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644780 5037 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644788 5037 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644795 5037 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644803 5037 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644811 5037 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644818 5037 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644825 5037 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644833 5037 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644840 5037 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644849 5037 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.644856 5037 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.645904 5037 flags.go:64] FLAG: --address="0.0.0.0" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.645926 5037 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.645941 5037 flags.go:64] FLAG: --anonymous-auth="true" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.645954 5037 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.645965 5037 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.645974 5037 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.645986 5037 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.645997 5037 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646007 5037 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646016 5037 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646026 5037 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646036 5037 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646045 5037 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646054 5037 flags.go:64] FLAG: --cgroup-root="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646063 5037 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646078 5037 flags.go:64] FLAG: --client-ca-file="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646088 5037 flags.go:64] FLAG: --cloud-config="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646097 5037 flags.go:64] FLAG: --cloud-provider="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646107 5037 flags.go:64] FLAG: --cluster-dns="[]" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646118 5037 flags.go:64] FLAG: --cluster-domain="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646127 5037 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646136 5037 flags.go:64] FLAG: --config-dir="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646145 5037 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646155 5037 flags.go:64] FLAG: --container-log-max-files="5" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646167 5037 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646175 5037 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646185 5037 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646194 5037 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646203 5037 flags.go:64] FLAG: --contention-profiling="false" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646213 5037 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646222 5037 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646231 5037 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646240 5037 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646251 5037 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646260 5037 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646270 5037 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646279 5037 flags.go:64] FLAG: --enable-load-reader="false" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646312 5037 flags.go:64] FLAG: --enable-server="true" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646322 5037 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646333 5037 flags.go:64] FLAG: --event-burst="100" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646342 5037 flags.go:64] FLAG: --event-qps="50" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646351 5037 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646360 5037 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646369 5037 flags.go:64] FLAG: --eviction-hard="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646404 5037 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646414 5037 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646423 5037 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646433 5037 flags.go:64] FLAG: --eviction-soft="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646442 5037 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646451 5037 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646466 5037 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646476 5037 flags.go:64] FLAG: --experimental-mounter-path="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646485 5037 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646494 5037 flags.go:64] FLAG: --fail-swap-on="true" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646503 5037 flags.go:64] FLAG: --feature-gates="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646515 5037 flags.go:64] FLAG: --file-check-frequency="20s" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646524 5037 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646533 5037 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646542 5037 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646551 5037 flags.go:64] FLAG: --healthz-port="10248" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646560 5037 flags.go:64] FLAG: --help="false" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646569 5037 flags.go:64] FLAG: --hostname-override="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646578 5037 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646587 5037 flags.go:64] FLAG: --http-check-frequency="20s" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646595 5037 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646604 5037 flags.go:64] FLAG: --image-credential-provider-config="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646613 5037 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646622 5037 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646630 5037 flags.go:64] FLAG: --image-service-endpoint="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646639 5037 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646648 5037 flags.go:64] FLAG: --kube-api-burst="100" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646657 5037 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646667 5037 flags.go:64] FLAG: --kube-api-qps="50" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646675 5037 flags.go:64] FLAG: --kube-reserved="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646684 5037 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646692 5037 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646702 5037 flags.go:64] FLAG: --kubelet-cgroups="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646711 5037 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646720 5037 flags.go:64] FLAG: --lock-file="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646729 5037 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646738 5037 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646747 5037 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646761 5037 flags.go:64] FLAG: --log-json-split-stream="false" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646769 5037 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646778 5037 flags.go:64] FLAG: --log-text-split-stream="false" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646787 5037 flags.go:64] FLAG: --logging-format="text" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646799 5037 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646811 5037 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646821 5037 flags.go:64] FLAG: --manifest-url="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646830 5037 flags.go:64] FLAG: --manifest-url-header="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646848 5037 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646857 5037 flags.go:64] FLAG: --max-open-files="1000000" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646868 5037 flags.go:64] FLAG: --max-pods="110" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646877 5037 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646887 5037 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646895 5037 flags.go:64] FLAG: --memory-manager-policy="None" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646904 5037 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646914 5037 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646924 5037 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646933 5037 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646953 5037 flags.go:64] FLAG: --node-status-max-images="50" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646962 5037 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646972 5037 flags.go:64] FLAG: --oom-score-adj="-999" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646981 5037 flags.go:64] FLAG: --pod-cidr="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.646990 5037 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647005 5037 flags.go:64] FLAG: --pod-manifest-path="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647014 5037 flags.go:64] FLAG: --pod-max-pids="-1" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647023 5037 flags.go:64] FLAG: --pods-per-core="0" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647032 5037 flags.go:64] FLAG: --port="10250" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647041 5037 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647049 5037 flags.go:64] FLAG: --provider-id="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647058 5037 flags.go:64] FLAG: --qos-reserved="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647067 5037 flags.go:64] FLAG: --read-only-port="10255" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647076 5037 flags.go:64] FLAG: --register-node="true" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647084 5037 flags.go:64] FLAG: --register-schedulable="true" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647093 5037 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647108 5037 flags.go:64] FLAG: --registry-burst="10" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647117 5037 flags.go:64] FLAG: --registry-qps="5" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647126 5037 flags.go:64] FLAG: --reserved-cpus="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647136 5037 flags.go:64] FLAG: --reserved-memory="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647147 5037 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647156 5037 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647165 5037 flags.go:64] FLAG: --rotate-certificates="false" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647182 5037 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647191 5037 flags.go:64] FLAG: --runonce="false" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647200 5037 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647209 5037 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647219 5037 flags.go:64] FLAG: --seccomp-default="false" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647228 5037 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647236 5037 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647248 5037 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647257 5037 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647266 5037 flags.go:64] FLAG: --storage-driver-password="root" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647275 5037 flags.go:64] FLAG: --storage-driver-secure="false" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647316 5037 flags.go:64] FLAG: --storage-driver-table="stats" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647327 5037 flags.go:64] FLAG: --storage-driver-user="root" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647336 5037 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647346 5037 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647356 5037 flags.go:64] FLAG: --system-cgroups="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647364 5037 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647378 5037 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647387 5037 flags.go:64] FLAG: --tls-cert-file="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647396 5037 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647408 5037 flags.go:64] FLAG: --tls-min-version="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647417 5037 flags.go:64] FLAG: --tls-private-key-file="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647425 5037 flags.go:64] FLAG: --topology-manager-policy="none" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647434 5037 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647443 5037 flags.go:64] FLAG: --topology-manager-scope="container" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647452 5037 flags.go:64] FLAG: --v="2" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647463 5037 flags.go:64] FLAG: --version="false" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647474 5037 flags.go:64] FLAG: --vmodule="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647485 5037 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.647495 5037 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.647716 5037 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.647727 5037 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.647736 5037 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.647744 5037 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.647752 5037 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.647760 5037 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.647770 5037 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.647777 5037 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.647785 5037 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.647796 5037 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.647806 5037 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.647815 5037 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.647824 5037 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.647832 5037 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.647840 5037 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.647848 5037 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.647855 5037 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.647863 5037 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.647872 5037 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.647880 5037 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.647888 5037 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.647897 5037 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.647905 5037 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.647914 5037 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.647924 5037 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.647933 5037 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.647941 5037 feature_gate.go:330] unrecognized feature gate: Example Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.647950 5037 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.647958 5037 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.647966 5037 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.647974 5037 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.647982 5037 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.647990 5037 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.647997 5037 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.648005 5037 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.648013 5037 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.648020 5037 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.648028 5037 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.648038 5037 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.648048 5037 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.648058 5037 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.648067 5037 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.648078 5037 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.648087 5037 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.648096 5037 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.648104 5037 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.648112 5037 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.648120 5037 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.648129 5037 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.648138 5037 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.648146 5037 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.648155 5037 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.648163 5037 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.648171 5037 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.648178 5037 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.648186 5037 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.648194 5037 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.648201 5037 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.648209 5037 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.648217 5037 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.648225 5037 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.648232 5037 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.648242 5037 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.648250 5037 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.648258 5037 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.648266 5037 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.648273 5037 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.648281 5037 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.648315 5037 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.648324 5037 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.648332 5037 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.648355 5037 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.660925 5037 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.661001 5037 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661168 5037 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661196 5037 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661210 5037 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661227 5037 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661238 5037 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661250 5037 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661262 5037 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661273 5037 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661317 5037 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661331 5037 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661343 5037 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661354 5037 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661366 5037 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661376 5037 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661385 5037 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661394 5037 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661403 5037 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661412 5037 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661422 5037 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661432 5037 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661441 5037 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661450 5037 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661458 5037 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661466 5037 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661475 5037 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661486 5037 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661495 5037 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661503 5037 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661511 5037 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661520 5037 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661528 5037 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661540 5037 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661551 5037 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661562 5037 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661572 5037 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661582 5037 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661592 5037 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661602 5037 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661611 5037 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661620 5037 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661629 5037 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661637 5037 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661646 5037 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661654 5037 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661663 5037 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661673 5037 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661683 5037 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661693 5037 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661706 5037 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661717 5037 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661728 5037 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661736 5037 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661745 5037 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661753 5037 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661762 5037 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661770 5037 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661779 5037 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661787 5037 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661796 5037 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661805 5037 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661814 5037 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661822 5037 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661831 5037 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661839 5037 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661847 5037 feature_gate.go:330] unrecognized feature gate: Example Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661857 5037 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661869 5037 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661880 5037 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661891 5037 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661908 5037 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.661917 5037 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.661933 5037 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662179 5037 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662194 5037 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662204 5037 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662214 5037 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662224 5037 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662233 5037 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662241 5037 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662250 5037 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662258 5037 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662268 5037 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662276 5037 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662312 5037 feature_gate.go:330] unrecognized feature gate: Example Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662321 5037 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662329 5037 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662338 5037 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662347 5037 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662355 5037 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662364 5037 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662372 5037 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662381 5037 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662389 5037 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662397 5037 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662406 5037 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662435 5037 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662444 5037 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662453 5037 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662461 5037 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662470 5037 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662478 5037 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662489 5037 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662501 5037 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662513 5037 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662522 5037 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662533 5037 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662542 5037 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662552 5037 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662560 5037 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662569 5037 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662578 5037 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662587 5037 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662596 5037 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662606 5037 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662615 5037 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662624 5037 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662633 5037 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662641 5037 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662651 5037 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662664 5037 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662675 5037 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662684 5037 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662692 5037 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662703 5037 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662714 5037 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662724 5037 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662733 5037 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662742 5037 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662754 5037 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662765 5037 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662776 5037 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662785 5037 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662794 5037 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662804 5037 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662813 5037 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662823 5037 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662832 5037 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662841 5037 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662849 5037 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662863 5037 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662876 5037 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662889 5037 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.662902 5037 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.662920 5037 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.663221 5037 server.go:940] "Client rotation is on, will bootstrap in background" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.673955 5037 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.674067 5037 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.675634 5037 server.go:997] "Starting client certificate rotation" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.675666 5037 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.676381 5037 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-11-22 21:35:37.501877412 +0000 UTC Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.676509 5037 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.702059 5037 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.705125 5037 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 26 14:15:33 crc kubenswrapper[5037]: E1126 14:15:33.705844 5037 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.69:6443: connect: connection refused" logger="UnhandledError" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.725030 5037 log.go:25] "Validated CRI v1 runtime API" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.766380 5037 log.go:25] "Validated CRI v1 image API" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.769056 5037 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.774425 5037 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-26-14-10-16-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.774476 5037 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.809468 5037 manager.go:217] Machine: {Timestamp:2025-11-26 14:15:33.805955188 +0000 UTC m=+0.602725452 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2799998 MemoryCapacity:33654124544 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:4d169cbc-8c3f-42b1-afc1-3f5b57e5ed06 BootID:b247aecb-f60a-4360-9d1b-a1f9057dc4ca Filesystems:[{Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827060224 Type:vfs Inodes:4108169 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365408768 Type:vfs Inodes:821633 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108169 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:20:3f:3f Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:20:3f:3f Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:d1:7c:2c Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:8f:7c:6c Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:ab:d9:e1 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:f3:ae:40 Speed:-1 Mtu:1496} {Name:ens7.23 MacAddress:52:54:00:bd:1e:42 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:8e:7b:1a:d7:b4:22 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:1a:7d:8d:25:ab:6b Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654124544 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.809869 5037 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.810182 5037 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.812240 5037 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.812694 5037 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.812767 5037 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.813361 5037 topology_manager.go:138] "Creating topology manager with none policy" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.813382 5037 container_manager_linux.go:303] "Creating device plugin manager" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.814039 5037 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.814094 5037 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.814515 5037 state_mem.go:36] "Initialized new in-memory state store" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.814660 5037 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.819438 5037 kubelet.go:418] "Attempting to sync node with API server" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.819483 5037 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.819539 5037 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.819564 5037 kubelet.go:324] "Adding apiserver pod source" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.819586 5037 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.824438 5037 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.825503 5037 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.69:6443: connect: connection refused Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.825541 5037 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.69:6443: connect: connection refused Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.825591 5037 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 26 14:15:33 crc kubenswrapper[5037]: E1126 14:15:33.825633 5037 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.69:6443: connect: connection refused" logger="UnhandledError" Nov 26 14:15:33 crc kubenswrapper[5037]: E1126 14:15:33.825666 5037 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.69:6443: connect: connection refused" logger="UnhandledError" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.827396 5037 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.829409 5037 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.829452 5037 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.829469 5037 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.829487 5037 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.829516 5037 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.829534 5037 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.829549 5037 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.829570 5037 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.829585 5037 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.829599 5037 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.829647 5037 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.829665 5037 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.830847 5037 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.831615 5037 server.go:1280] "Started kubelet" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.833184 5037 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.833206 5037 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.834102 5037 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.69:6443: connect: connection refused Nov 26 14:15:33 crc systemd[1]: Started Kubernetes Kubelet. Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.835827 5037 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.836371 5037 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.836442 5037 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.836490 5037 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-03 11:35:20.197537234 +0000 UTC Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.836558 5037 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 165h19m46.360987989s for next certificate rotation Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.836594 5037 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.836614 5037 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.836907 5037 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 26 14:15:33 crc kubenswrapper[5037]: E1126 14:15:33.837544 5037 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.838032 5037 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.69:6443: connect: connection refused Nov 26 14:15:33 crc kubenswrapper[5037]: E1126 14:15:33.838136 5037 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.69:6443: connect: connection refused" logger="UnhandledError" Nov 26 14:15:33 crc kubenswrapper[5037]: E1126 14:15:33.838225 5037 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.69:6443: connect: connection refused" interval="200ms" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.838404 5037 server.go:460] "Adding debug handlers to kubelet server" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.841213 5037 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.842795 5037 factory.go:55] Registering systemd factory Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.842868 5037 factory.go:221] Registration of the systemd container factory successfully Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.847630 5037 factory.go:153] Registering CRI-O factory Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.847684 5037 factory.go:221] Registration of the crio container factory successfully Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.847735 5037 factory.go:103] Registering Raw factory Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.847769 5037 manager.go:1196] Started watching for new ooms in manager Nov 26 14:15:33 crc kubenswrapper[5037]: E1126 14:15:33.848715 5037 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.69:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187b941d95a98343 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-26 14:15:33.831557955 +0000 UTC m=+0.628328199,LastTimestamp:2025-11-26 14:15:33.831557955 +0000 UTC m=+0.628328199,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.852469 5037 manager.go:319] Starting recovery of all containers Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.862254 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.862394 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.862418 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.862438 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.862457 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.864600 5037 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.864657 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.864682 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.864702 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.864727 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.864746 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.864796 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.864815 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.864832 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.864858 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.864879 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.864898 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.864919 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.864938 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.864957 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.864976 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.864993 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.865012 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.865034 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.865073 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.865094 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.865115 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.865169 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.865200 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.865228 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.865255 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.865316 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.865350 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.865379 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.865406 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.865434 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.865460 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.865488 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.865513 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.865579 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.865606 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.865632 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.865701 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.865727 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.865752 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.865778 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.865804 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.865834 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.865861 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.865890 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.865915 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.866024 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.866051 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.866090 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.866119 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.866150 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.866177 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.866207 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.866238 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.866264 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.866321 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.866350 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.866380 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.866407 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.866433 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.866461 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.866485 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.866509 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.866534 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.866559 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.866583 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.866608 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.866635 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.866659 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.866688 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.866713 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.866739 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.866769 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.866796 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.866828 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.866855 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.866922 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.866949 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.866976 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.867005 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.867032 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.867063 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.867090 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.867115 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.867142 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.867168 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.867196 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.867222 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.867248 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.867274 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.867375 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.867405 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.867434 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.867462 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.867496 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.867523 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.867548 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.867577 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.867604 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.867629 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.867672 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.867703 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.867733 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.867764 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.867796 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.867826 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.867862 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.867890 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.867919 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.867946 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.867973 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.867997 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.868024 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.868062 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.868087 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.868111 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.868139 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.868165 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.868193 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.868221 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.868250 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.868275 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.868341 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.868366 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.868390 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.868416 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.868444 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.868469 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.868495 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.868523 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.868549 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.868577 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.868614 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.868644 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.868670 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.868694 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.868720 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.868744 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.868775 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.868800 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.868823 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.868848 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.868872 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.868895 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.868921 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.868952 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.868979 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.869002 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.869031 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.869054 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.869080 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.869109 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.869136 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.869161 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.869194 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.869219 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.869249 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.869273 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.869334 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.869363 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.869389 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.869417 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.869444 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.869470 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.869495 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.869537 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.869563 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.869590 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.869615 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.869643 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.869669 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.869694 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.869719 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.869745 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.869769 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.869796 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.869943 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.869990 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.870008 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.870025 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.870042 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.870081 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.870100 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.870123 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.870164 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.870182 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.870241 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.870256 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.870272 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.870317 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.870333 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.870347 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.870363 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.870402 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.870417 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.870431 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.870445 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.870483 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.870498 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.870512 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.870552 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.870571 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.870587 5037 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.870601 5037 reconstruct.go:97] "Volume reconstruction finished" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.870636 5037 reconciler.go:26] "Reconciler: start to sync state" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.892013 5037 manager.go:324] Recovery completed Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.904458 5037 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.906550 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.906804 5037 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.906874 5037 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.906913 5037 kubelet.go:2335] "Starting kubelet main sync loop" Nov 26 14:15:33 crc kubenswrapper[5037]: E1126 14:15:33.906986 5037 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 26 14:15:33 crc kubenswrapper[5037]: W1126 14:15:33.908386 5037 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.69:6443: connect: connection refused Nov 26 14:15:33 crc kubenswrapper[5037]: E1126 14:15:33.908504 5037 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.69:6443: connect: connection refused" logger="UnhandledError" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.908952 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.909167 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.909180 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.910075 5037 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.910094 5037 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.910125 5037 state_mem.go:36] "Initialized new in-memory state store" Nov 26 14:15:33 crc kubenswrapper[5037]: E1126 14:15:33.937961 5037 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.942350 5037 policy_none.go:49] "None policy: Start" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.943817 5037 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 26 14:15:33 crc kubenswrapper[5037]: I1126 14:15:33.943863 5037 state_mem.go:35] "Initializing new in-memory state store" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.003796 5037 manager.go:334] "Starting Device Plugin manager" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.003954 5037 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.003974 5037 server.go:79] "Starting device plugin registration server" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.004474 5037 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.004549 5037 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.004832 5037 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.004967 5037 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.004979 5037 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.007326 5037 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.007442 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.008538 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.008573 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.008584 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.008713 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.009975 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.010010 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.010022 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.010152 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.010162 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.010304 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.010353 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.010412 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.011249 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.011271 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.011280 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.011464 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.011583 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.011609 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.011621 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.011875 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.011899 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.011938 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.011957 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.011970 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.013069 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.013094 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.013116 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.013096 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.013146 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.013130 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.013308 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.013361 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.013394 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.014377 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.014398 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.014408 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.014434 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.014481 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.014496 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.014813 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.014857 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.016574 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.016639 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.016658 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:34 crc kubenswrapper[5037]: E1126 14:15:34.018721 5037 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 26 14:15:34 crc kubenswrapper[5037]: E1126 14:15:34.043512 5037 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.69:6443: connect: connection refused" interval="400ms" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.073488 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.073540 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.073570 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.073596 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.073623 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.073649 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.073674 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.073757 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.073834 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.073885 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.073911 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.073959 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.073980 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.074002 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.074050 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.104867 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.106120 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.106162 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.106180 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.106209 5037 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 14:15:34 crc kubenswrapper[5037]: E1126 14:15:34.106631 5037 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.69:6443: connect: connection refused" node="crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.175332 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.175403 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.175453 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.175483 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.175511 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.175533 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.175560 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.175614 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.175640 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.175641 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.175666 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.175693 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.175715 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.175737 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.175740 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.175818 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.175757 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.175858 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.175863 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.175821 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.175757 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.175791 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.175907 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.175964 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.176000 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.176087 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.176127 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.176138 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.176156 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.176413 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.307127 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.308649 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.308691 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.308703 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.308732 5037 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 14:15:34 crc kubenswrapper[5037]: E1126 14:15:34.309233 5037 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.69:6443: connect: connection refused" node="crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.339195 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.353505 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.368028 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.384416 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: W1126 14:15:34.387992 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-307583d0faeae9513c96d7aa910c8658dc88f1eac70a527488a9cd554d120db2 WatchSource:0}: Error finding container 307583d0faeae9513c96d7aa910c8658dc88f1eac70a527488a9cd554d120db2: Status 404 returned error can't find the container with id 307583d0faeae9513c96d7aa910c8658dc88f1eac70a527488a9cd554d120db2 Nov 26 14:15:34 crc kubenswrapper[5037]: W1126 14:15:34.389234 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-4066a0b6caf5d49c8f99082d75238016fc440ab60d284d13fa18aba88cde9416 WatchSource:0}: Error finding container 4066a0b6caf5d49c8f99082d75238016fc440ab60d284d13fa18aba88cde9416: Status 404 returned error can't find the container with id 4066a0b6caf5d49c8f99082d75238016fc440ab60d284d13fa18aba88cde9416 Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.390446 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 14:15:34 crc kubenswrapper[5037]: W1126 14:15:34.393940 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-7428edd3e09a8798cd61d9a97d854e5e6cb7777cc867dd63e5de50e0f42562fb WatchSource:0}: Error finding container 7428edd3e09a8798cd61d9a97d854e5e6cb7777cc867dd63e5de50e0f42562fb: Status 404 returned error can't find the container with id 7428edd3e09a8798cd61d9a97d854e5e6cb7777cc867dd63e5de50e0f42562fb Nov 26 14:15:34 crc kubenswrapper[5037]: W1126 14:15:34.411381 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-76d3d72f758ed352c34a4bdc6c48f4bf9677b45d77f831c1a3d2366642706dcd WatchSource:0}: Error finding container 76d3d72f758ed352c34a4bdc6c48f4bf9677b45d77f831c1a3d2366642706dcd: Status 404 returned error can't find the container with id 76d3d72f758ed352c34a4bdc6c48f4bf9677b45d77f831c1a3d2366642706dcd Nov 26 14:15:34 crc kubenswrapper[5037]: W1126 14:15:34.418533 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-fd377de0e29aabbf036cf86d9d84414b212c3e4d15b7b55e7def3686fcab55da WatchSource:0}: Error finding container fd377de0e29aabbf036cf86d9d84414b212c3e4d15b7b55e7def3686fcab55da: Status 404 returned error can't find the container with id fd377de0e29aabbf036cf86d9d84414b212c3e4d15b7b55e7def3686fcab55da Nov 26 14:15:34 crc kubenswrapper[5037]: E1126 14:15:34.444806 5037 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.69:6443: connect: connection refused" interval="800ms" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.710328 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.713177 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.713252 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.713277 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.713382 5037 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 14:15:34 crc kubenswrapper[5037]: E1126 14:15:34.714238 5037 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.69:6443: connect: connection refused" node="crc" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.835941 5037 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.69:6443: connect: connection refused Nov 26 14:15:34 crc kubenswrapper[5037]: W1126 14:15:34.842877 5037 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.69:6443: connect: connection refused Nov 26 14:15:34 crc kubenswrapper[5037]: E1126 14:15:34.842975 5037 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.69:6443: connect: connection refused" logger="UnhandledError" Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.914326 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"7428edd3e09a8798cd61d9a97d854e5e6cb7777cc867dd63e5de50e0f42562fb"} Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.916248 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"4066a0b6caf5d49c8f99082d75238016fc440ab60d284d13fa18aba88cde9416"} Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.917275 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"307583d0faeae9513c96d7aa910c8658dc88f1eac70a527488a9cd554d120db2"} Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.918368 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"fd377de0e29aabbf036cf86d9d84414b212c3e4d15b7b55e7def3686fcab55da"} Nov 26 14:15:34 crc kubenswrapper[5037]: I1126 14:15:34.919376 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"76d3d72f758ed352c34a4bdc6c48f4bf9677b45d77f831c1a3d2366642706dcd"} Nov 26 14:15:34 crc kubenswrapper[5037]: W1126 14:15:34.934750 5037 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.69:6443: connect: connection refused Nov 26 14:15:34 crc kubenswrapper[5037]: E1126 14:15:34.934876 5037 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.69:6443: connect: connection refused" logger="UnhandledError" Nov 26 14:15:35 crc kubenswrapper[5037]: W1126 14:15:35.041194 5037 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.69:6443: connect: connection refused Nov 26 14:15:35 crc kubenswrapper[5037]: E1126 14:15:35.041416 5037 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.69:6443: connect: connection refused" logger="UnhandledError" Nov 26 14:15:35 crc kubenswrapper[5037]: E1126 14:15:35.246999 5037 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.69:6443: connect: connection refused" interval="1.6s" Nov 26 14:15:35 crc kubenswrapper[5037]: W1126 14:15:35.306613 5037 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.69:6443: connect: connection refused Nov 26 14:15:35 crc kubenswrapper[5037]: E1126 14:15:35.306739 5037 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.69:6443: connect: connection refused" logger="UnhandledError" Nov 26 14:15:35 crc kubenswrapper[5037]: I1126 14:15:35.515462 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:35 crc kubenswrapper[5037]: I1126 14:15:35.517510 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:35 crc kubenswrapper[5037]: I1126 14:15:35.517592 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:35 crc kubenswrapper[5037]: I1126 14:15:35.517629 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:35 crc kubenswrapper[5037]: I1126 14:15:35.517681 5037 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 14:15:35 crc kubenswrapper[5037]: E1126 14:15:35.518562 5037 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.69:6443: connect: connection refused" node="crc" Nov 26 14:15:35 crc kubenswrapper[5037]: I1126 14:15:35.760389 5037 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Nov 26 14:15:35 crc kubenswrapper[5037]: E1126 14:15:35.762424 5037 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.69:6443: connect: connection refused" logger="UnhandledError" Nov 26 14:15:35 crc kubenswrapper[5037]: I1126 14:15:35.835871 5037 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.69:6443: connect: connection refused Nov 26 14:15:35 crc kubenswrapper[5037]: I1126 14:15:35.926154 5037 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c" exitCode=0 Nov 26 14:15:35 crc kubenswrapper[5037]: I1126 14:15:35.926342 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c"} Nov 26 14:15:35 crc kubenswrapper[5037]: I1126 14:15:35.926422 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:35 crc kubenswrapper[5037]: I1126 14:15:35.927981 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:35 crc kubenswrapper[5037]: I1126 14:15:35.928012 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:35 crc kubenswrapper[5037]: I1126 14:15:35.928027 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:35 crc kubenswrapper[5037]: I1126 14:15:35.928477 5037 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="67b6d1d6a2e9cd7c0d4ebf05afd5ac9c1d2f538bea5c0eef57cb232d3f33dacf" exitCode=0 Nov 26 14:15:35 crc kubenswrapper[5037]: I1126 14:15:35.928636 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"67b6d1d6a2e9cd7c0d4ebf05afd5ac9c1d2f538bea5c0eef57cb232d3f33dacf"} Nov 26 14:15:35 crc kubenswrapper[5037]: I1126 14:15:35.928712 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:35 crc kubenswrapper[5037]: I1126 14:15:35.930633 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:35 crc kubenswrapper[5037]: I1126 14:15:35.930688 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:35 crc kubenswrapper[5037]: I1126 14:15:35.930706 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:35 crc kubenswrapper[5037]: I1126 14:15:35.930788 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:35 crc kubenswrapper[5037]: I1126 14:15:35.932317 5037 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="e3e27accffafe284222e866c66ab5df9fc47e560e4c68fbf68412ccce6622735" exitCode=0 Nov 26 14:15:35 crc kubenswrapper[5037]: I1126 14:15:35.932360 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"e3e27accffafe284222e866c66ab5df9fc47e560e4c68fbf68412ccce6622735"} Nov 26 14:15:35 crc kubenswrapper[5037]: I1126 14:15:35.932408 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:35 crc kubenswrapper[5037]: I1126 14:15:35.933196 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:35 crc kubenswrapper[5037]: I1126 14:15:35.933252 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:35 crc kubenswrapper[5037]: I1126 14:15:35.933271 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:35 crc kubenswrapper[5037]: I1126 14:15:35.933581 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:35 crc kubenswrapper[5037]: I1126 14:15:35.933632 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:35 crc kubenswrapper[5037]: I1126 14:15:35.933652 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:35 crc kubenswrapper[5037]: I1126 14:15:35.935113 5037 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="862c88c8d26bb3f4d41d277c4df81162f98e6f27a1e191d4fe45c2c29eef6612" exitCode=0 Nov 26 14:15:35 crc kubenswrapper[5037]: I1126 14:15:35.935176 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"862c88c8d26bb3f4d41d277c4df81162f98e6f27a1e191d4fe45c2c29eef6612"} Nov 26 14:15:35 crc kubenswrapper[5037]: I1126 14:15:35.935377 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:35 crc kubenswrapper[5037]: I1126 14:15:35.945663 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:35 crc kubenswrapper[5037]: I1126 14:15:35.945718 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:35 crc kubenswrapper[5037]: I1126 14:15:35.945732 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:35 crc kubenswrapper[5037]: I1126 14:15:35.946700 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"c10f95ff9c8fe951bea68ca3932581ecdcb55eee4f45bd79eeeb314fbd67ee80"} Nov 26 14:15:35 crc kubenswrapper[5037]: I1126 14:15:35.946934 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"af730a97f25a795f2f5f5a9b59a3c72868fd1d8f16a451fed1f7ce947779786e"} Nov 26 14:15:35 crc kubenswrapper[5037]: I1126 14:15:35.947088 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf"} Nov 26 14:15:36 crc kubenswrapper[5037]: I1126 14:15:36.836092 5037 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.69:6443: connect: connection refused Nov 26 14:15:36 crc kubenswrapper[5037]: E1126 14:15:36.848355 5037 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.69:6443: connect: connection refused" interval="3.2s" Nov 26 14:15:36 crc kubenswrapper[5037]: I1126 14:15:36.951511 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"222073fcbe74545f98ff4e8e05ced7ddc2e23933edff2e2135da7fbc33cfac57"} Nov 26 14:15:36 crc kubenswrapper[5037]: I1126 14:15:36.951659 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:36 crc kubenswrapper[5037]: I1126 14:15:36.952720 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:36 crc kubenswrapper[5037]: I1126 14:15:36.952763 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:36 crc kubenswrapper[5037]: I1126 14:15:36.952774 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:36 crc kubenswrapper[5037]: I1126 14:15:36.958818 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8"} Nov 26 14:15:36 crc kubenswrapper[5037]: I1126 14:15:36.958889 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a"} Nov 26 14:15:36 crc kubenswrapper[5037]: I1126 14:15:36.958904 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a"} Nov 26 14:15:36 crc kubenswrapper[5037]: I1126 14:15:36.963223 5037 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="82e3fcf00a2a6d4ae590e298292ddc609aa9f542f3fb885569030a123f9b50ce" exitCode=0 Nov 26 14:15:36 crc kubenswrapper[5037]: I1126 14:15:36.963259 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"82e3fcf00a2a6d4ae590e298292ddc609aa9f542f3fb885569030a123f9b50ce"} Nov 26 14:15:36 crc kubenswrapper[5037]: I1126 14:15:36.963442 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:36 crc kubenswrapper[5037]: I1126 14:15:36.964598 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:36 crc kubenswrapper[5037]: I1126 14:15:36.964621 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:36 crc kubenswrapper[5037]: I1126 14:15:36.964632 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:36 crc kubenswrapper[5037]: I1126 14:15:36.965834 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"9311af562d977275bf004eeccea8add66957ef55996ac9665e946adb4c8bfe1d"} Nov 26 14:15:36 crc kubenswrapper[5037]: I1126 14:15:36.965889 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:36 crc kubenswrapper[5037]: I1126 14:15:36.966803 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:36 crc kubenswrapper[5037]: I1126 14:15:36.966849 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:36 crc kubenswrapper[5037]: I1126 14:15:36.966862 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:36 crc kubenswrapper[5037]: I1126 14:15:36.970776 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"30b88b73e6299d048160f3e7b1698df43e63aa1dc98e86f8472bc47994019f6d"} Nov 26 14:15:36 crc kubenswrapper[5037]: I1126 14:15:36.970848 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:36 crc kubenswrapper[5037]: I1126 14:15:36.970853 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"e75e4e0ccd9d317e18bd7f97c06cdc5d2bcb53c2de228f3619c894d964304770"} Nov 26 14:15:36 crc kubenswrapper[5037]: I1126 14:15:36.971124 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"b728895e4ecd26bbe5587512878f5dfb72643d07acc38dccecdf55d9369d1811"} Nov 26 14:15:36 crc kubenswrapper[5037]: I1126 14:15:36.971732 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:36 crc kubenswrapper[5037]: I1126 14:15:36.971776 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:36 crc kubenswrapper[5037]: I1126 14:15:36.971790 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:37 crc kubenswrapper[5037]: I1126 14:15:37.118898 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:37 crc kubenswrapper[5037]: I1126 14:15:37.120642 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:37 crc kubenswrapper[5037]: I1126 14:15:37.120708 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:37 crc kubenswrapper[5037]: I1126 14:15:37.120725 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:37 crc kubenswrapper[5037]: I1126 14:15:37.120783 5037 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 14:15:37 crc kubenswrapper[5037]: E1126 14:15:37.121797 5037 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.69:6443: connect: connection refused" node="crc" Nov 26 14:15:37 crc kubenswrapper[5037]: W1126 14:15:37.205010 5037 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.69:6443: connect: connection refused Nov 26 14:15:37 crc kubenswrapper[5037]: E1126 14:15:37.205727 5037 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.69:6443: connect: connection refused" logger="UnhandledError" Nov 26 14:15:37 crc kubenswrapper[5037]: I1126 14:15:37.977525 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e"} Nov 26 14:15:37 crc kubenswrapper[5037]: I1126 14:15:37.977588 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4"} Nov 26 14:15:37 crc kubenswrapper[5037]: I1126 14:15:37.977641 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:37 crc kubenswrapper[5037]: I1126 14:15:37.979602 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:37 crc kubenswrapper[5037]: I1126 14:15:37.979657 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:37 crc kubenswrapper[5037]: I1126 14:15:37.979676 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:37 crc kubenswrapper[5037]: I1126 14:15:37.982232 5037 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="a42c21de17d8f05bfc2456a51494a671216b04d7ed247d19c1f9a8a2ac0a958b" exitCode=0 Nov 26 14:15:37 crc kubenswrapper[5037]: I1126 14:15:37.982343 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"a42c21de17d8f05bfc2456a51494a671216b04d7ed247d19c1f9a8a2ac0a958b"} Nov 26 14:15:37 crc kubenswrapper[5037]: I1126 14:15:37.982413 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:37 crc kubenswrapper[5037]: I1126 14:15:37.982421 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 14:15:37 crc kubenswrapper[5037]: I1126 14:15:37.982424 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:37 crc kubenswrapper[5037]: I1126 14:15:37.982367 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:37 crc kubenswrapper[5037]: I1126 14:15:37.982636 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:37 crc kubenswrapper[5037]: I1126 14:15:37.984488 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:37 crc kubenswrapper[5037]: I1126 14:15:37.984552 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:37 crc kubenswrapper[5037]: I1126 14:15:37.984578 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:37 crc kubenswrapper[5037]: I1126 14:15:37.984590 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:37 crc kubenswrapper[5037]: I1126 14:15:37.984674 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:37 crc kubenswrapper[5037]: I1126 14:15:37.984688 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:37 crc kubenswrapper[5037]: I1126 14:15:37.984704 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:37 crc kubenswrapper[5037]: I1126 14:15:37.984735 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:37 crc kubenswrapper[5037]: I1126 14:15:37.984748 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:37 crc kubenswrapper[5037]: I1126 14:15:37.984705 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:37 crc kubenswrapper[5037]: I1126 14:15:37.984805 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:37 crc kubenswrapper[5037]: I1126 14:15:37.984834 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:38 crc kubenswrapper[5037]: I1126 14:15:38.993442 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"795402f50bec7d732c02e0739a8d738b7bfbdde20c52110f9bfb7d1beb96dc92"} Nov 26 14:15:38 crc kubenswrapper[5037]: I1126 14:15:38.993504 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"1181aac58ebc7d29267d30e539673e982d99f6d174ee886344a9bfdc334d3070"} Nov 26 14:15:38 crc kubenswrapper[5037]: I1126 14:15:38.993520 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"f20dcb2755cf3b7e8dea28b899a6f8178eb1beeb9230e5e9c0306f6ac7ea122e"} Nov 26 14:15:38 crc kubenswrapper[5037]: I1126 14:15:38.993590 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:38 crc kubenswrapper[5037]: I1126 14:15:38.994123 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:38 crc kubenswrapper[5037]: I1126 14:15:38.996447 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:15:38 crc kubenswrapper[5037]: I1126 14:15:38.999453 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:38 crc kubenswrapper[5037]: I1126 14:15:38.999510 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:38 crc kubenswrapper[5037]: I1126 14:15:38.999531 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:39 crc kubenswrapper[5037]: I1126 14:15:39.001513 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:39 crc kubenswrapper[5037]: I1126 14:15:39.001563 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:39 crc kubenswrapper[5037]: I1126 14:15:39.001581 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:39 crc kubenswrapper[5037]: I1126 14:15:39.315592 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 14:15:39 crc kubenswrapper[5037]: I1126 14:15:39.315897 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:39 crc kubenswrapper[5037]: I1126 14:15:39.318239 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:39 crc kubenswrapper[5037]: I1126 14:15:39.318414 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:39 crc kubenswrapper[5037]: I1126 14:15:39.318449 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:40 crc kubenswrapper[5037]: I1126 14:15:40.006319 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"336d7e61e5355bab6896e75dab7d8699f5aa63348b880b602a5692d3e55f8bb6"} Nov 26 14:15:40 crc kubenswrapper[5037]: I1126 14:15:40.006430 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"aea238f78fa789a938ad54a67d44502b18651b50b0e998fc4fd9836b5a4c22c2"} Nov 26 14:15:40 crc kubenswrapper[5037]: I1126 14:15:40.006493 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:40 crc kubenswrapper[5037]: I1126 14:15:40.006493 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:40 crc kubenswrapper[5037]: I1126 14:15:40.008519 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:40 crc kubenswrapper[5037]: I1126 14:15:40.008583 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:40 crc kubenswrapper[5037]: I1126 14:15:40.008606 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:40 crc kubenswrapper[5037]: I1126 14:15:40.009477 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:40 crc kubenswrapper[5037]: I1126 14:15:40.009558 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:40 crc kubenswrapper[5037]: I1126 14:15:40.009585 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:40 crc kubenswrapper[5037]: I1126 14:15:40.102426 5037 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Nov 26 14:15:40 crc kubenswrapper[5037]: I1126 14:15:40.155210 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 14:15:40 crc kubenswrapper[5037]: I1126 14:15:40.155537 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:40 crc kubenswrapper[5037]: I1126 14:15:40.157192 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:40 crc kubenswrapper[5037]: I1126 14:15:40.157263 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:40 crc kubenswrapper[5037]: I1126 14:15:40.157326 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:40 crc kubenswrapper[5037]: I1126 14:15:40.322233 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:40 crc kubenswrapper[5037]: I1126 14:15:40.324399 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:40 crc kubenswrapper[5037]: I1126 14:15:40.324468 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:40 crc kubenswrapper[5037]: I1126 14:15:40.324490 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:40 crc kubenswrapper[5037]: I1126 14:15:40.324535 5037 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 14:15:40 crc kubenswrapper[5037]: I1126 14:15:40.783835 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 26 14:15:40 crc kubenswrapper[5037]: I1126 14:15:40.892329 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:15:41 crc kubenswrapper[5037]: I1126 14:15:41.009495 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:41 crc kubenswrapper[5037]: I1126 14:15:41.009575 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:41 crc kubenswrapper[5037]: I1126 14:15:41.010924 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:41 crc kubenswrapper[5037]: I1126 14:15:41.010952 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:41 crc kubenswrapper[5037]: I1126 14:15:41.010962 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:41 crc kubenswrapper[5037]: I1126 14:15:41.011398 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:41 crc kubenswrapper[5037]: I1126 14:15:41.011457 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:41 crc kubenswrapper[5037]: I1126 14:15:41.011488 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:41 crc kubenswrapper[5037]: I1126 14:15:41.371909 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 26 14:15:41 crc kubenswrapper[5037]: I1126 14:15:41.786602 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:15:42 crc kubenswrapper[5037]: I1126 14:15:42.011985 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:42 crc kubenswrapper[5037]: I1126 14:15:42.012037 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:42 crc kubenswrapper[5037]: I1126 14:15:42.013638 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:42 crc kubenswrapper[5037]: I1126 14:15:42.013710 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:42 crc kubenswrapper[5037]: I1126 14:15:42.013734 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:42 crc kubenswrapper[5037]: I1126 14:15:42.013831 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:42 crc kubenswrapper[5037]: I1126 14:15:42.013929 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:42 crc kubenswrapper[5037]: I1126 14:15:42.013949 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:43 crc kubenswrapper[5037]: I1126 14:15:43.014870 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:43 crc kubenswrapper[5037]: I1126 14:15:43.016108 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:43 crc kubenswrapper[5037]: I1126 14:15:43.016157 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:43 crc kubenswrapper[5037]: I1126 14:15:43.016166 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:44 crc kubenswrapper[5037]: E1126 14:15:44.020007 5037 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 26 14:15:45 crc kubenswrapper[5037]: I1126 14:15:45.547598 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 14:15:45 crc kubenswrapper[5037]: I1126 14:15:45.547884 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:45 crc kubenswrapper[5037]: I1126 14:15:45.549668 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:45 crc kubenswrapper[5037]: I1126 14:15:45.549734 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:45 crc kubenswrapper[5037]: I1126 14:15:45.549759 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:45 crc kubenswrapper[5037]: I1126 14:15:45.553418 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 14:15:46 crc kubenswrapper[5037]: I1126 14:15:46.030797 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:46 crc kubenswrapper[5037]: I1126 14:15:46.036764 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:46 crc kubenswrapper[5037]: I1126 14:15:46.036865 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:46 crc kubenswrapper[5037]: I1126 14:15:46.036887 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:46 crc kubenswrapper[5037]: I1126 14:15:46.037709 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 14:15:46 crc kubenswrapper[5037]: I1126 14:15:46.129796 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 14:15:47 crc kubenswrapper[5037]: I1126 14:15:47.033708 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:47 crc kubenswrapper[5037]: I1126 14:15:47.035014 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:47 crc kubenswrapper[5037]: I1126 14:15:47.035052 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:47 crc kubenswrapper[5037]: I1126 14:15:47.035063 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:47 crc kubenswrapper[5037]: W1126 14:15:47.518204 5037 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 26 14:15:47 crc kubenswrapper[5037]: I1126 14:15:47.518367 5037 trace.go:236] Trace[538978559]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (26-Nov-2025 14:15:37.516) (total time: 10001ms): Nov 26 14:15:47 crc kubenswrapper[5037]: Trace[538978559]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (14:15:47.518) Nov 26 14:15:47 crc kubenswrapper[5037]: Trace[538978559]: [10.00146317s] [10.00146317s] END Nov 26 14:15:47 crc kubenswrapper[5037]: E1126 14:15:47.518405 5037 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 26 14:15:47 crc kubenswrapper[5037]: W1126 14:15:47.788645 5037 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 26 14:15:47 crc kubenswrapper[5037]: I1126 14:15:47.788785 5037 trace.go:236] Trace[866315758]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (26-Nov-2025 14:15:37.786) (total time: 10001ms): Nov 26 14:15:47 crc kubenswrapper[5037]: Trace[866315758]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (14:15:47.788) Nov 26 14:15:47 crc kubenswrapper[5037]: Trace[866315758]: [10.001874341s] [10.001874341s] END Nov 26 14:15:47 crc kubenswrapper[5037]: E1126 14:15:47.788820 5037 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 26 14:15:47 crc kubenswrapper[5037]: I1126 14:15:47.836735 5037 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Nov 26 14:15:47 crc kubenswrapper[5037]: W1126 14:15:47.877365 5037 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 26 14:15:47 crc kubenswrapper[5037]: I1126 14:15:47.877777 5037 trace.go:236] Trace[434580797]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (26-Nov-2025 14:15:37.875) (total time: 10002ms): Nov 26 14:15:47 crc kubenswrapper[5037]: Trace[434580797]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (14:15:47.877) Nov 26 14:15:47 crc kubenswrapper[5037]: Trace[434580797]: [10.002223347s] [10.002223347s] END Nov 26 14:15:47 crc kubenswrapper[5037]: E1126 14:15:47.877959 5037 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 26 14:15:47 crc kubenswrapper[5037]: E1126 14:15:47.985520 5037 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": net/http: TLS handshake timeout" event="&Event{ObjectMeta:{crc.187b941d95a98343 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-26 14:15:33.831557955 +0000 UTC m=+0.628328199,LastTimestamp:2025-11-26 14:15:33.831557955 +0000 UTC m=+0.628328199,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 26 14:15:48 crc kubenswrapper[5037]: I1126 14:15:48.036839 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:48 crc kubenswrapper[5037]: I1126 14:15:48.038155 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:48 crc kubenswrapper[5037]: I1126 14:15:48.038207 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:48 crc kubenswrapper[5037]: I1126 14:15:48.038220 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:48 crc kubenswrapper[5037]: I1126 14:15:48.401463 5037 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 26 14:15:48 crc kubenswrapper[5037]: I1126 14:15:48.401545 5037 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 26 14:15:48 crc kubenswrapper[5037]: I1126 14:15:48.407011 5037 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 26 14:15:48 crc kubenswrapper[5037]: I1126 14:15:48.407098 5037 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 26 14:15:49 crc kubenswrapper[5037]: I1126 14:15:49.130586 5037 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 26 14:15:49 crc kubenswrapper[5037]: I1126 14:15:49.130691 5037 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 26 14:15:50 crc kubenswrapper[5037]: I1126 14:15:50.814511 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 26 14:15:50 crc kubenswrapper[5037]: I1126 14:15:50.815389 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:50 crc kubenswrapper[5037]: I1126 14:15:50.816709 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:50 crc kubenswrapper[5037]: I1126 14:15:50.816815 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:50 crc kubenswrapper[5037]: I1126 14:15:50.816889 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:50 crc kubenswrapper[5037]: I1126 14:15:50.829932 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 26 14:15:50 crc kubenswrapper[5037]: I1126 14:15:50.898926 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:15:50 crc kubenswrapper[5037]: I1126 14:15:50.899141 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:50 crc kubenswrapper[5037]: I1126 14:15:50.900638 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:50 crc kubenswrapper[5037]: I1126 14:15:50.900815 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:50 crc kubenswrapper[5037]: I1126 14:15:50.900925 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:50 crc kubenswrapper[5037]: I1126 14:15:50.903107 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:15:51 crc kubenswrapper[5037]: I1126 14:15:51.045541 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:51 crc kubenswrapper[5037]: I1126 14:15:51.045680 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:51 crc kubenswrapper[5037]: I1126 14:15:51.046542 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:51 crc kubenswrapper[5037]: I1126 14:15:51.046639 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:51 crc kubenswrapper[5037]: I1126 14:15:51.046693 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:51 crc kubenswrapper[5037]: I1126 14:15:51.047007 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:51 crc kubenswrapper[5037]: I1126 14:15:51.047035 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:51 crc kubenswrapper[5037]: I1126 14:15:51.047048 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:52 crc kubenswrapper[5037]: I1126 14:15:52.023561 5037 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 26 14:15:52 crc kubenswrapper[5037]: I1126 14:15:52.169895 5037 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 26 14:15:53 crc kubenswrapper[5037]: E1126 14:15:53.395566 5037 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.397712 5037 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.399537 5037 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 26 14:15:53 crc kubenswrapper[5037]: E1126 14:15:53.399683 5037 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.406028 5037 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.429862 5037 csr.go:261] certificate signing request csr-ct4t8 is approved, waiting to be issued Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.434933 5037 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:59596->192.168.126.11:17697: read: connection reset by peer" start-of-body= Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.435136 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:59596->192.168.126.11:17697: read: connection reset by peer" Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.435691 5037 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.435794 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.439781 5037 csr.go:257] certificate signing request csr-ct4t8 is issued Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.675154 5037 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Nov 26 14:15:53 crc kubenswrapper[5037]: W1126 14:15:53.675753 5037 reflector.go:484] k8s.io/client-go/informers/factory.go:160: watch of *v1.Node ended with: very short watch: k8s.io/client-go/informers/factory.go:160: Unexpected watch close - watch lasted less than a second and no items received Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.775676 5037 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.836032 5037 apiserver.go:52] "Watching apiserver" Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.842232 5037 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.842616 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf"] Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.843207 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.843473 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.843607 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:15:53 crc kubenswrapper[5037]: E1126 14:15:53.843591 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.843709 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 14:15:53 crc kubenswrapper[5037]: E1126 14:15:53.843733 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.843913 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.844047 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:15:53 crc kubenswrapper[5037]: E1126 14:15:53.844478 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.845586 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.846918 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.847108 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.847270 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.847870 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.849072 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.849344 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.849394 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.851591 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.873074 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.885003 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.896932 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.912886 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.924532 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.937163 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.937842 5037 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.949191 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.960668 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.972110 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.983004 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 14:15:53 crc kubenswrapper[5037]: I1126 14:15:53.997700 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.003058 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.003107 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.003144 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.003171 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.003193 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.003217 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.003240 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.003262 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.003305 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.003333 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.003355 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.003377 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.003398 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.003416 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.003433 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.003475 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.003506 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.003527 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.003544 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.003563 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.003583 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.003604 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.003645 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.003681 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.003705 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.003729 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.003753 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.003779 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.003822 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.003849 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.003872 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.003893 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.003914 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.003934 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.003957 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.003982 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004003 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004024 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004044 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004066 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004089 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004116 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004140 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004164 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004204 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004244 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004269 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004313 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004335 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004374 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004358 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004394 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004420 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004449 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004472 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004470 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004495 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004518 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004542 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004562 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004579 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004587 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004614 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004636 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004658 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004680 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004722 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004731 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004770 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004775 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004840 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004868 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004897 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004918 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004985 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.004994 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005018 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005046 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005072 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005101 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005127 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005152 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005177 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005202 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005224 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005247 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005261 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005269 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005346 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005378 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005409 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005435 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005462 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005492 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005497 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005521 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005553 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005557 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005581 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005612 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005641 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005667 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005695 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005723 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005724 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005750 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005781 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005809 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005835 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005861 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005886 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005912 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005938 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005964 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.005988 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.006011 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.006032 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.006058 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.006077 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.006087 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.006111 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.006135 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.006162 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.006184 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.006189 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.006226 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.006248 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.006268 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.006320 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.006460 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.006501 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.006554 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.006887 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.006908 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.006925 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.006955 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.006973 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.006991 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.007009 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.007026 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.007041 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.007063 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.007082 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.007101 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.007119 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.007138 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.007155 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.007171 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.007187 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.007205 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.007224 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.007243 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.007260 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.007278 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.007307 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.007326 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.007343 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.007364 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.007382 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.007399 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.007418 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.007434 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.007477 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.007495 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.007619 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.007776 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.008060 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.008075 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.008364 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.008388 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.008768 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.008787 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.008807 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.008851 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.008998 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.009035 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.009066 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.009087 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.009113 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.009136 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.012590 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.012621 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.012654 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.012673 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.012691 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.012710 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.012726 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.012743 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.012761 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.012781 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.012799 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.012818 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.012836 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.012853 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.012871 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.012888 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.012904 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.012921 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.012938 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.012953 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.012973 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.012991 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013009 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013025 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013042 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013058 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013075 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013091 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013108 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013127 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013144 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013160 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013177 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013196 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013214 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013258 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013295 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013317 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013335 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013357 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013376 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013398 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013417 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013436 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013458 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013479 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013500 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013711 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013728 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013800 5037 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013820 5037 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013834 5037 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013844 5037 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013856 5037 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013868 5037 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013878 5037 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013888 5037 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013898 5037 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013909 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013919 5037 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013928 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013938 5037 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013947 5037 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013957 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013967 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013977 5037 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.013996 5037 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.014006 5037 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.014015 5037 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.014275 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.008362 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.008832 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.008907 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.009170 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.009455 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.009656 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.009798 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.010066 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.010609 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.010609 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.010852 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.010891 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.010964 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.011066 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.011268 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.011529 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.011768 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.012040 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.012135 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.012167 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.012318 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.018257 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.018696 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.018880 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.019053 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.019222 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.019429 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.020391 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.021004 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.048008 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.021259 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.021435 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.021448 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.021853 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.021913 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.028535 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.028875 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.029112 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.029337 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.038991 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.039314 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.040617 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.041193 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.041540 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.045627 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.046003 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.046234 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.046558 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.046585 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.046865 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.046999 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.047057 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.047343 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.047433 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: E1126 14:15:54.048089 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:15:54.548060253 +0000 UTC m=+21.344830437 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.048353 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.048375 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.048663 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.048890 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.049317 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.049372 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.049434 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.049792 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.049786 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.049826 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.050075 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.050126 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.050143 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.050338 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.050232 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.050466 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.050520 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.050606 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.050932 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.051072 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.051138 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.051356 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.046070 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.051666 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.052039 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.052124 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.052505 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.052520 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.052631 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.052636 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.052912 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.053027 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.053090 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.053112 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: E1126 14:15:54.053123 5037 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.053274 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.053504 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.053527 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.053615 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.053648 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.053851 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.054036 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.054220 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.054274 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.055525 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.055628 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.055704 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: E1126 14:15:54.055740 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 14:15:54.555715511 +0000 UTC m=+21.352485865 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.055737 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.056045 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.056089 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.056549 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.056547 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.057232 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.057404 5037 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 26 14:15:54 crc kubenswrapper[5037]: E1126 14:15:54.058824 5037 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 14:15:54 crc kubenswrapper[5037]: E1126 14:15:54.070836 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 14:15:54.57080834 +0000 UTC m=+21.367578514 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.074272 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.074557 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.074799 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.075111 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.075940 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.076059 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.076095 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.075068 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.076994 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.077025 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.077411 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.077487 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.078205 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.078480 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.078757 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.078800 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.078883 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.078903 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.078976 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.079045 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.079274 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.079551 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.079632 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.079833 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.080599 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.080659 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.080919 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.081488 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.081528 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.081896 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.081950 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.082675 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.082997 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.083034 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.084158 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 14:15:54 crc kubenswrapper[5037]: E1126 14:15:54.084732 5037 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 14:15:54 crc kubenswrapper[5037]: E1126 14:15:54.084762 5037 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 14:15:54 crc kubenswrapper[5037]: E1126 14:15:54.084801 5037 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 14:15:54 crc kubenswrapper[5037]: E1126 14:15:54.084908 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 14:15:54.584884617 +0000 UTC m=+21.381654801 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.085342 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.086058 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.086422 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.086616 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.086963 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.088284 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.088588 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.088695 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.088866 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.090571 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: E1126 14:15:54.092865 5037 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 14:15:54 crc kubenswrapper[5037]: E1126 14:15:54.092925 5037 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 14:15:54 crc kubenswrapper[5037]: E1126 14:15:54.092954 5037 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 14:15:54 crc kubenswrapper[5037]: E1126 14:15:54.093061 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 14:15:54.593023275 +0000 UTC m=+21.389793459 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.093980 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.095128 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.095589 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.095743 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.099401 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.097519 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.104654 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.105725 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.107732 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.108131 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.110216 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.110247 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.110727 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.110908 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.110931 5037 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e" exitCode=255 Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.110981 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e"} Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.111352 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.111532 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.111708 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.112467 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.112840 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.113100 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.113105 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.113271 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.113430 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.113551 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.113587 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.114501 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.114542 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.114611 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.114625 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.114638 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.114649 5037 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.114661 5037 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.114672 5037 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.114684 5037 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.114696 5037 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.114708 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.114719 5037 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.114729 5037 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.114740 5037 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.114752 5037 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.114764 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.114775 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.114814 5037 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.114826 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.114838 5037 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.114850 5037 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.114861 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.114873 5037 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.114885 5037 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.114897 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.114910 5037 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.114923 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.114935 5037 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.114947 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.114959 5037 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.114973 5037 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.114985 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.114997 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115008 5037 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115019 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115030 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115041 5037 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115051 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115062 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115074 5037 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115084 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115095 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115106 5037 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115117 5037 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115128 5037 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115141 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115153 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115165 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115177 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115189 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115200 5037 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115211 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115222 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115233 5037 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115243 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115255 5037 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115267 5037 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115278 5037 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115312 5037 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115332 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115351 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115367 5037 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115380 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115390 5037 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115401 5037 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115411 5037 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115422 5037 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115433 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115443 5037 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115453 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115462 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115474 5037 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115484 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115494 5037 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115506 5037 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115516 5037 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115528 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115541 5037 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115554 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115564 5037 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115576 5037 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115587 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115598 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115611 5037 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115716 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115735 5037 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115711 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115777 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115812 5037 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115847 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115936 5037 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115952 5037 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115966 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115981 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115999 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.116015 5037 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.116027 5037 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.116038 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.116050 5037 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.116061 5037 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.116072 5037 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.116083 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.116095 5037 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.116106 5037 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.116116 5037 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.116128 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.116138 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.116150 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.116162 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.116173 5037 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.116184 5037 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.116196 5037 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.116207 5037 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.116217 5037 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.116232 5037 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.116243 5037 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.116256 5037 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.116268 5037 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.116757 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.115836 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.116962 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.118948 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.119170 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.120618 5037 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.120670 5037 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.120682 5037 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.120696 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.120706 5037 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.120715 5037 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.120725 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.120738 5037 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.120751 5037 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.120765 5037 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.120778 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.120790 5037 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.120804 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.120818 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.120831 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.120843 5037 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.120857 5037 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.120870 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.120881 5037 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.120905 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.120915 5037 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.120924 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.120933 5037 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.120942 5037 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.120952 5037 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.120961 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.120969 5037 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.120980 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.120989 5037 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.120998 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.121007 5037 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.121018 5037 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.121027 5037 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.121037 5037 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.121046 5037 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.121055 5037 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.121063 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.121072 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.121081 5037 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.121090 5037 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.121099 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.121109 5037 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.121119 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.121130 5037 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.121141 5037 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.121151 5037 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.121169 5037 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.121179 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.121188 5037 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.121198 5037 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.121207 5037 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.121217 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.130797 5037 scope.go:117] "RemoveContainer" containerID="afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.133351 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.133480 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.146757 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.148670 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.151109 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.152308 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.163497 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.167172 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.170377 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.170729 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.179160 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.183049 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 14:15:54 crc kubenswrapper[5037]: W1126 14:15:54.191709 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-08d29bdc2a3f203fe614d9089b969a0b139edb1a32ddc333929bb2c29612384e WatchSource:0}: Error finding container 08d29bdc2a3f203fe614d9089b969a0b139edb1a32ddc333929bb2c29612384e: Status 404 returned error can't find the container with id 08d29bdc2a3f203fe614d9089b969a0b139edb1a32ddc333929bb2c29612384e Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.196557 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.209405 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.221899 5037 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.221941 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.221958 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.221973 5037 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.221987 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.222018 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.222031 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.222045 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.441246 5037 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-11-26 14:10:53 +0000 UTC, rotation deadline is 2026-09-05 10:45:28.056051947 +0000 UTC Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.441330 5037 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 6788h29m33.614724105s for next certificate rotation Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.625710 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.625776 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.625809 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.625832 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.625878 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:15:54 crc kubenswrapper[5037]: E1126 14:15:54.625980 5037 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 14:15:54 crc kubenswrapper[5037]: E1126 14:15:54.626002 5037 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 14:15:54 crc kubenswrapper[5037]: E1126 14:15:54.626035 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:15:55.626012631 +0000 UTC m=+22.422782805 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:15:54 crc kubenswrapper[5037]: E1126 14:15:54.626041 5037 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 14:15:54 crc kubenswrapper[5037]: E1126 14:15:54.626057 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 14:15:55.626048522 +0000 UTC m=+22.422818706 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 14:15:54 crc kubenswrapper[5037]: E1126 14:15:54.626061 5037 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 14:15:54 crc kubenswrapper[5037]: E1126 14:15:54.626057 5037 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 14:15:54 crc kubenswrapper[5037]: E1126 14:15:54.626070 5037 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 14:15:54 crc kubenswrapper[5037]: E1126 14:15:54.626196 5037 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 14:15:54 crc kubenswrapper[5037]: E1126 14:15:54.626212 5037 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 14:15:54 crc kubenswrapper[5037]: E1126 14:15:54.626106 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 14:15:55.626094713 +0000 UTC m=+22.422864897 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 14:15:54 crc kubenswrapper[5037]: E1126 14:15:54.626265 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 14:15:55.626253866 +0000 UTC m=+22.423024050 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 14:15:54 crc kubenswrapper[5037]: E1126 14:15:54.626310 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 14:15:55.626302817 +0000 UTC m=+22.423073001 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.991905 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-8tjq6"] Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.992307 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-8tjq6" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.996054 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.996226 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 26 14:15:54 crc kubenswrapper[5037]: I1126 14:15:54.996373 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.015231 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:55Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.039908 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:55Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.060546 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:55Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.101736 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"821d0155-28e9-4160-8885-aa8cc1d60197\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"t denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 14:15:53.421172 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 14:15:53.421210 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421215 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421220 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 14:15:53.421225 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 14:15:53.421228 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 14:15:53.421231 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 14:15:53.421248 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1126 14:15:53.426755 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3728845122/tls.crt::/tmp/serving-cert-3728845122/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764166537\\\\\\\\\\\\\\\" (2025-11-26 14:15:36 +0000 UTC to 2025-12-26 14:15:37 +0000 UTC (now=2025-11-26 14:15:53.426718319 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426903 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764166548\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764166547\\\\\\\\\\\\\\\" (2025-11-26 13:15:47 +0000 UTC to 2026-11-26 13:15:47 +0000 UTC (now=2025-11-26 14:15:53.426883943 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426929 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1126 14:15:53.426951 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nF1126 14:15:53.427030 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:55Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.114222 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"92b37f5a43045595441dda27ecce78e85a7172a9f0b9301b713e4f639388be9c"} Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.114269 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"2d0c3205634aaa1ec9add93ff4da5799da6c5f8702a91abaa5d6b52dfc77a0ab"} Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.114280 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"08d29bdc2a3f203fe614d9089b969a0b139edb1a32ddc333929bb2c29612384e"} Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.116409 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"1a939c321c37e297f74956ffe4bc13081d04cf44fc3d12ffe7714dc66613f455"} Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.117569 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"c2e7efb219e45d8ae5c49bd0dfaa921f6c02e4646ea234df0f8b1a3f50adab58"} Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.117602 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"3833786311a330df4c31c32b2ad3a6cef6cb42c9e4d7b9eedf914ff932f6c334"} Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.119141 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.121313 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375"} Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.121558 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.129622 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mjhs8\" (UniqueName: \"kubernetes.io/projected/8f4a637d-4b3f-4289-a84c-cd2559430a0e-kube-api-access-mjhs8\") pod \"node-resolver-8tjq6\" (UID: \"8f4a637d-4b3f-4289-a84c-cd2559430a0e\") " pod="openshift-dns/node-resolver-8tjq6" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.129669 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/8f4a637d-4b3f-4289-a84c-cd2559430a0e-hosts-file\") pod \"node-resolver-8tjq6\" (UID: \"8f4a637d-4b3f-4289-a84c-cd2559430a0e\") " pod="openshift-dns/node-resolver-8tjq6" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.145261 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:55Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.192895 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:55Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.224311 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8tjq6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f4a637d-4b3f-4289-a84c-cd2559430a0e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mjhs8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8tjq6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:55Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.230238 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/8f4a637d-4b3f-4289-a84c-cd2559430a0e-hosts-file\") pod \"node-resolver-8tjq6\" (UID: \"8f4a637d-4b3f-4289-a84c-cd2559430a0e\") " pod="openshift-dns/node-resolver-8tjq6" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.230741 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mjhs8\" (UniqueName: \"kubernetes.io/projected/8f4a637d-4b3f-4289-a84c-cd2559430a0e-kube-api-access-mjhs8\") pod \"node-resolver-8tjq6\" (UID: \"8f4a637d-4b3f-4289-a84c-cd2559430a0e\") " pod="openshift-dns/node-resolver-8tjq6" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.234096 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/8f4a637d-4b3f-4289-a84c-cd2559430a0e-hosts-file\") pod \"node-resolver-8tjq6\" (UID: \"8f4a637d-4b3f-4289-a84c-cd2559430a0e\") " pod="openshift-dns/node-resolver-8tjq6" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.247882 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:55Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.251325 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mjhs8\" (UniqueName: \"kubernetes.io/projected/8f4a637d-4b3f-4289-a84c-cd2559430a0e-kube-api-access-mjhs8\") pod \"node-resolver-8tjq6\" (UID: \"8f4a637d-4b3f-4289-a84c-cd2559430a0e\") " pod="openshift-dns/node-resolver-8tjq6" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.267712 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"821d0155-28e9-4160-8885-aa8cc1d60197\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"t denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 14:15:53.421172 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 14:15:53.421210 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421215 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421220 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 14:15:53.421225 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 14:15:53.421228 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 14:15:53.421231 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 14:15:53.421248 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1126 14:15:53.426755 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3728845122/tls.crt::/tmp/serving-cert-3728845122/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764166537\\\\\\\\\\\\\\\" (2025-11-26 14:15:36 +0000 UTC to 2025-12-26 14:15:37 +0000 UTC (now=2025-11-26 14:15:53.426718319 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426903 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764166548\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764166547\\\\\\\\\\\\\\\" (2025-11-26 13:15:47 +0000 UTC to 2026-11-26 13:15:47 +0000 UTC (now=2025-11-26 14:15:53.426883943 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426929 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1126 14:15:53.426951 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nF1126 14:15:53.427030 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:55Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.306370 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:55Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.308565 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-8tjq6" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.317764 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:55Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:55 crc kubenswrapper[5037]: W1126 14:15:55.327409 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8f4a637d_4b3f_4289_a84c_cd2559430a0e.slice/crio-b1968aaf92d40c9e24ce231e423600800214b27435637414f1bfbdb8c3695b64 WatchSource:0}: Error finding container b1968aaf92d40c9e24ce231e423600800214b27435637414f1bfbdb8c3695b64: Status 404 returned error can't find the container with id b1968aaf92d40c9e24ce231e423600800214b27435637414f1bfbdb8c3695b64 Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.332093 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:55Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.354485 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2e7efb219e45d8ae5c49bd0dfaa921f6c02e4646ea234df0f8b1a3f50adab58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:55Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.375821 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92b37f5a43045595441dda27ecce78e85a7172a9f0b9301b713e4f639388be9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0c3205634aaa1ec9add93ff4da5799da6c5f8702a91abaa5d6b52dfc77a0ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:55Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.401444 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:55Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.417499 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8tjq6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f4a637d-4b3f-4289-a84c-cd2559430a0e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mjhs8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8tjq6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:55Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.633652 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.633747 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.633782 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.633809 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.633841 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:15:55 crc kubenswrapper[5037]: E1126 14:15:55.633968 5037 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 14:15:55 crc kubenswrapper[5037]: E1126 14:15:55.633999 5037 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 14:15:55 crc kubenswrapper[5037]: E1126 14:15:55.634035 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 14:15:57.634015811 +0000 UTC m=+24.430785995 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 14:15:55 crc kubenswrapper[5037]: E1126 14:15:55.634035 5037 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 14:15:55 crc kubenswrapper[5037]: E1126 14:15:55.634078 5037 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 14:15:55 crc kubenswrapper[5037]: E1126 14:15:55.634098 5037 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 14:15:55 crc kubenswrapper[5037]: E1126 14:15:55.634116 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 14:15:57.634091612 +0000 UTC m=+24.430861816 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 14:15:55 crc kubenswrapper[5037]: E1126 14:15:55.634055 5037 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 14:15:55 crc kubenswrapper[5037]: E1126 14:15:55.634159 5037 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 14:15:55 crc kubenswrapper[5037]: E1126 14:15:55.634172 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 14:15:57.634150494 +0000 UTC m=+24.430920698 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 14:15:55 crc kubenswrapper[5037]: E1126 14:15:55.634180 5037 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 14:15:55 crc kubenswrapper[5037]: E1126 14:15:55.634230 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 14:15:57.634217635 +0000 UTC m=+24.430988019 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 14:15:55 crc kubenswrapper[5037]: E1126 14:15:55.634270 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:15:57.634259136 +0000 UTC m=+24.431029330 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.783169 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-hn6x5"] Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.783806 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.787777 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-8jk2d"] Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.788225 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.788216 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-lxpjp"] Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.788780 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-lxpjp" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.790851 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-fdhhj"] Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.790935 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.791092 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.791771 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.792662 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.797817 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 26 14:15:55 crc kubenswrapper[5037]: W1126 14:15:55.797856 5037 reflector.go:561] object-"openshift-machine-config-operator"/"proxy-tls": failed to list *v1.Secret: secrets "proxy-tls" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-machine-config-operator": no relationship found between node 'crc' and this object Nov 26 14:15:55 crc kubenswrapper[5037]: E1126 14:15:55.797902 5037 reflector.go:158] "Unhandled Error" err="object-\"openshift-machine-config-operator\"/\"proxy-tls\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"proxy-tls\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-machine-config-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.801589 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.802118 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.803914 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.807400 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.808101 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.808275 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.808357 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.808742 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.808750 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.808910 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.817721 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.818925 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.819044 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:55Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.819087 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.819124 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.837319 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"821d0155-28e9-4160-8885-aa8cc1d60197\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"t denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 14:15:53.421172 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 14:15:53.421210 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421215 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421220 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 14:15:53.421225 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 14:15:53.421228 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 14:15:53.421231 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 14:15:53.421248 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1126 14:15:53.426755 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3728845122/tls.crt::/tmp/serving-cert-3728845122/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764166537\\\\\\\\\\\\\\\" (2025-11-26 14:15:36 +0000 UTC to 2025-12-26 14:15:37 +0000 UTC (now=2025-11-26 14:15:53.426718319 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426903 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764166548\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764166547\\\\\\\\\\\\\\\" (2025-11-26 13:15:47 +0000 UTC to 2026-11-26 13:15:47 +0000 UTC (now=2025-11-26 14:15:53.426883943 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426929 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1126 14:15:53.426951 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nF1126 14:15:53.427030 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:55Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.870453 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:55Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.890661 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:55Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.907918 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:15:55 crc kubenswrapper[5037]: E1126 14:15:55.908093 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.908180 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:15:55 crc kubenswrapper[5037]: E1126 14:15:55.908229 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.908355 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:15:55 crc kubenswrapper[5037]: E1126 14:15:55.908422 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.912972 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.913607 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.914018 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8tjq6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f4a637d-4b3f-4289-a84c-cd2559430a0e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mjhs8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8tjq6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:55Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.914546 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.915164 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.916450 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.916992 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.918013 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.918579 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.919657 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.920177 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.921079 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.921771 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.922654 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.923163 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.923689 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.924665 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.925357 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.926349 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.927024 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.927785 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.929080 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.929815 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.930116 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3393ec4-cc72-499a-8557-ec6ca329a142\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hn6x5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:55Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.930393 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.931955 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.933581 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.934886 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.935698 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.936691 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-cnibin\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.936733 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-log-socket\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.936747 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.936783 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-os-release\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.936809 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-slash\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.936874 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.936931 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/454ee6da-70e5-4d30-89e5-19a35123a278-ovnkube-config\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.937000 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb-mcd-auth-proxy-config\") pod \"machine-config-daemon-8jk2d\" (UID: \"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\") " pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.937044 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/490e7d88-ae7f-45f9-ab12-598c33e3bc69-cni-binary-copy\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.937069 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/454ee6da-70e5-4d30-89e5-19a35123a278-ovnkube-script-lib\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.937091 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb-rootfs\") pod \"machine-config-daemon-8jk2d\" (UID: \"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\") " pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.937113 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/b3393ec4-cc72-499a-8557-ec6ca329a142-tuning-conf-dir\") pod \"multus-additional-cni-plugins-hn6x5\" (UID: \"b3393ec4-cc72-499a-8557-ec6ca329a142\") " pod="openshift-multus/multus-additional-cni-plugins-hn6x5" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.937139 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-system-cni-dir\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.937161 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-multus-cni-dir\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.937185 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-var-lib-openvswitch\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.937212 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-node-log\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.937251 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/454ee6da-70e5-4d30-89e5-19a35123a278-ovn-node-metrics-cert\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.937524 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-run-ovn-kubernetes\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.937590 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.937592 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2lw2l\" (UniqueName: \"kubernetes.io/projected/b3393ec4-cc72-499a-8557-ec6ca329a142-kube-api-access-2lw2l\") pod \"multus-additional-cni-plugins-hn6x5\" (UID: \"b3393ec4-cc72-499a-8557-ec6ca329a142\") " pod="openshift-multus/multus-additional-cni-plugins-hn6x5" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.937633 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/454ee6da-70e5-4d30-89e5-19a35123a278-env-overrides\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.937689 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-host-run-netns\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.937710 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/490e7d88-ae7f-45f9-ab12-598c33e3bc69-multus-daemon-config\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.937756 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-cni-netd\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.937825 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-multus-socket-dir-parent\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.937852 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-host-run-k8s-cni-cncf-io\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.937902 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-systemd-units\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.937922 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-run-netns\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.938010 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-host-var-lib-cni-bin\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.938047 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-multus-conf-dir\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.938110 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5sbcs\" (UniqueName: \"kubernetes.io/projected/490e7d88-ae7f-45f9-ab12-598c33e3bc69-kube-api-access-5sbcs\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.938142 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-run-systemd\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.938176 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.938204 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-etc-openvswitch\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.938230 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb-proxy-tls\") pod \"machine-config-daemon-8jk2d\" (UID: \"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\") " pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.938688 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-cni-bin\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.938726 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/b3393ec4-cc72-499a-8557-ec6ca329a142-cnibin\") pod \"multus-additional-cni-plugins-hn6x5\" (UID: \"b3393ec4-cc72-499a-8557-ec6ca329a142\") " pod="openshift-multus/multus-additional-cni-plugins-hn6x5" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.938760 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-host-run-multus-certs\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.938780 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-etc-kubernetes\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.938803 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-run-ovn\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.938830 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9v8cx\" (UniqueName: \"kubernetes.io/projected/8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb-kube-api-access-9v8cx\") pod \"machine-config-daemon-8jk2d\" (UID: \"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\") " pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.938925 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/b3393ec4-cc72-499a-8557-ec6ca329a142-system-cni-dir\") pod \"multus-additional-cni-plugins-hn6x5\" (UID: \"b3393ec4-cc72-499a-8557-ec6ca329a142\") " pod="openshift-multus/multus-additional-cni-plugins-hn6x5" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.938972 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/b3393ec4-cc72-499a-8557-ec6ca329a142-os-release\") pod \"multus-additional-cni-plugins-hn6x5\" (UID: \"b3393ec4-cc72-499a-8557-ec6ca329a142\") " pod="openshift-multus/multus-additional-cni-plugins-hn6x5" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.938998 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/b3393ec4-cc72-499a-8557-ec6ca329a142-cni-binary-copy\") pod \"multus-additional-cni-plugins-hn6x5\" (UID: \"b3393ec4-cc72-499a-8557-ec6ca329a142\") " pod="openshift-multus/multus-additional-cni-plugins-hn6x5" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.939021 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-hostroot\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.939050 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mhgm2\" (UniqueName: \"kubernetes.io/projected/454ee6da-70e5-4d30-89e5-19a35123a278-kube-api-access-mhgm2\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.939071 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/b3393ec4-cc72-499a-8557-ec6ca329a142-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-hn6x5\" (UID: \"b3393ec4-cc72-499a-8557-ec6ca329a142\") " pod="openshift-multus/multus-additional-cni-plugins-hn6x5" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.939090 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-host-var-lib-cni-multus\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.939116 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-host-var-lib-kubelet\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.939145 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-kubelet\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.939167 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-run-openvswitch\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.939467 5037 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.939617 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.941446 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.942394 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.942809 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.944723 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.945957 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2e7efb219e45d8ae5c49bd0dfaa921f6c02e4646ea234df0f8b1a3f50adab58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:55Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.946474 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.947030 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.953807 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.954922 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.955502 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.956123 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.956864 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.957621 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.958199 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.958384 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92b37f5a43045595441dda27ecce78e85a7172a9f0b9301b713e4f639388be9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0c3205634aaa1ec9add93ff4da5799da6c5f8702a91abaa5d6b52dfc77a0ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:55Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.958873 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.961579 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.962609 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.963600 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.964139 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.965141 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.965900 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.966650 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.967855 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.969971 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:55Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:55 crc kubenswrapper[5037]: I1126 14:15:55.992187 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2e7efb219e45d8ae5c49bd0dfaa921f6c02e4646ea234df0f8b1a3f50adab58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:55Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.004248 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8tjq6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f4a637d-4b3f-4289-a84c-cd2559430a0e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mjhs8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8tjq6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:56Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.019191 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:56Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.037576 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92b37f5a43045595441dda27ecce78e85a7172a9f0b9301b713e4f639388be9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0c3205634aaa1ec9add93ff4da5799da6c5f8702a91abaa5d6b52dfc77a0ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:56Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.039965 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/454ee6da-70e5-4d30-89e5-19a35123a278-env-overrides\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.040008 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/490e7d88-ae7f-45f9-ab12-598c33e3bc69-multus-daemon-config\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.040025 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-cni-netd\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.040044 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-host-run-netns\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.040061 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-systemd-units\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.040077 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-run-netns\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.040092 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-multus-socket-dir-parent\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.040107 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-host-run-k8s-cni-cncf-io\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.040122 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-run-systemd\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.040146 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-etc-openvswitch\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.040149 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-cni-netd\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.040168 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-host-var-lib-cni-bin\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.040183 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-multus-conf-dir\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.040240 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-run-netns\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.040338 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-host-run-k8s-cni-cncf-io\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.040349 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-multus-conf-dir\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.040373 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-run-systemd\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.040353 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-host-run-netns\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.040395 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5sbcs\" (UniqueName: \"kubernetes.io/projected/490e7d88-ae7f-45f9-ab12-598c33e3bc69-kube-api-access-5sbcs\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.040406 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-etc-openvswitch\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.040416 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb-proxy-tls\") pod \"machine-config-daemon-8jk2d\" (UID: \"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\") " pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.040427 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-systemd-units\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.040451 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-host-var-lib-cni-bin\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.040481 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-cni-bin\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.040578 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-cni-bin\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.040588 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-etc-kubernetes\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.040625 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-run-ovn\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.040693 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/b3393ec4-cc72-499a-8557-ec6ca329a142-cnibin\") pod \"multus-additional-cni-plugins-hn6x5\" (UID: \"b3393ec4-cc72-499a-8557-ec6ca329a142\") " pod="openshift-multus/multus-additional-cni-plugins-hn6x5" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.040734 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-host-run-multus-certs\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.040758 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/b3393ec4-cc72-499a-8557-ec6ca329a142-system-cni-dir\") pod \"multus-additional-cni-plugins-hn6x5\" (UID: \"b3393ec4-cc72-499a-8557-ec6ca329a142\") " pod="openshift-multus/multus-additional-cni-plugins-hn6x5" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.040760 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-etc-kubernetes\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.040775 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/b3393ec4-cc72-499a-8557-ec6ca329a142-os-release\") pod \"multus-additional-cni-plugins-hn6x5\" (UID: \"b3393ec4-cc72-499a-8557-ec6ca329a142\") " pod="openshift-multus/multus-additional-cni-plugins-hn6x5" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.040814 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/b3393ec4-cc72-499a-8557-ec6ca329a142-system-cni-dir\") pod \"multus-additional-cni-plugins-hn6x5\" (UID: \"b3393ec4-cc72-499a-8557-ec6ca329a142\") " pod="openshift-multus/multus-additional-cni-plugins-hn6x5" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.040822 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/b3393ec4-cc72-499a-8557-ec6ca329a142-cnibin\") pod \"multus-additional-cni-plugins-hn6x5\" (UID: \"b3393ec4-cc72-499a-8557-ec6ca329a142\") " pod="openshift-multus/multus-additional-cni-plugins-hn6x5" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.040845 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9v8cx\" (UniqueName: \"kubernetes.io/projected/8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb-kube-api-access-9v8cx\") pod \"machine-config-daemon-8jk2d\" (UID: \"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\") " pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.040873 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mhgm2\" (UniqueName: \"kubernetes.io/projected/454ee6da-70e5-4d30-89e5-19a35123a278-kube-api-access-mhgm2\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.040876 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-run-ovn\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.040851 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-multus-socket-dir-parent\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.040901 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-host-run-multus-certs\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041058 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/b3393ec4-cc72-499a-8557-ec6ca329a142-cni-binary-copy\") pod \"multus-additional-cni-plugins-hn6x5\" (UID: \"b3393ec4-cc72-499a-8557-ec6ca329a142\") " pod="openshift-multus/multus-additional-cni-plugins-hn6x5" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041090 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-hostroot\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041107 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-run-openvswitch\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041146 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-run-openvswitch\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041183 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/b3393ec4-cc72-499a-8557-ec6ca329a142-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-hn6x5\" (UID: \"b3393ec4-cc72-499a-8557-ec6ca329a142\") " pod="openshift-multus/multus-additional-cni-plugins-hn6x5" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041194 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-hostroot\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041235 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-host-var-lib-cni-multus\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041254 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-host-var-lib-kubelet\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041273 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-kubelet\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041306 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-cnibin\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041329 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-log-socket\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041340 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-host-var-lib-kubelet\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041349 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-slash\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041353 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/454ee6da-70e5-4d30-89e5-19a35123a278-env-overrides\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041368 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041380 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-kubelet\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041377 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-host-var-lib-cni-multus\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041411 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041414 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-cnibin\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041435 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-log-socket\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041389 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/454ee6da-70e5-4d30-89e5-19a35123a278-ovnkube-config\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041502 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-os-release\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041418 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-slash\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041522 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb-mcd-auth-proxy-config\") pod \"machine-config-daemon-8jk2d\" (UID: \"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\") " pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041541 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/490e7d88-ae7f-45f9-ab12-598c33e3bc69-cni-binary-copy\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041557 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/454ee6da-70e5-4d30-89e5-19a35123a278-ovnkube-script-lib\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041595 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/b3393ec4-cc72-499a-8557-ec6ca329a142-os-release\") pod \"multus-additional-cni-plugins-hn6x5\" (UID: \"b3393ec4-cc72-499a-8557-ec6ca329a142\") " pod="openshift-multus/multus-additional-cni-plugins-hn6x5" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041625 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb-rootfs\") pod \"machine-config-daemon-8jk2d\" (UID: \"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\") " pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041600 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb-rootfs\") pod \"machine-config-daemon-8jk2d\" (UID: \"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\") " pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041595 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-os-release\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041667 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/b3393ec4-cc72-499a-8557-ec6ca329a142-tuning-conf-dir\") pod \"multus-additional-cni-plugins-hn6x5\" (UID: \"b3393ec4-cc72-499a-8557-ec6ca329a142\") " pod="openshift-multus/multus-additional-cni-plugins-hn6x5" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041710 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-var-lib-openvswitch\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041729 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-node-log\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041794 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-var-lib-openvswitch\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041864 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/454ee6da-70e5-4d30-89e5-19a35123a278-ovn-node-metrics-cert\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041892 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-node-log\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041897 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-system-cni-dir\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041926 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-multus-cni-dir\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041968 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-run-ovn-kubernetes\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.041997 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2lw2l\" (UniqueName: \"kubernetes.io/projected/b3393ec4-cc72-499a-8557-ec6ca329a142-kube-api-access-2lw2l\") pod \"multus-additional-cni-plugins-hn6x5\" (UID: \"b3393ec4-cc72-499a-8557-ec6ca329a142\") " pod="openshift-multus/multus-additional-cni-plugins-hn6x5" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.042003 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-system-cni-dir\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.042058 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-run-ovn-kubernetes\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.042059 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/b3393ec4-cc72-499a-8557-ec6ca329a142-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-hn6x5\" (UID: \"b3393ec4-cc72-499a-8557-ec6ca329a142\") " pod="openshift-multus/multus-additional-cni-plugins-hn6x5" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.042087 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/490e7d88-ae7f-45f9-ab12-598c33e3bc69-multus-cni-dir\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.042130 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/454ee6da-70e5-4d30-89e5-19a35123a278-ovnkube-config\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.042362 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/b3393ec4-cc72-499a-8557-ec6ca329a142-tuning-conf-dir\") pod \"multus-additional-cni-plugins-hn6x5\" (UID: \"b3393ec4-cc72-499a-8557-ec6ca329a142\") " pod="openshift-multus/multus-additional-cni-plugins-hn6x5" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.042388 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/454ee6da-70e5-4d30-89e5-19a35123a278-ovnkube-script-lib\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.042390 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb-mcd-auth-proxy-config\") pod \"machine-config-daemon-8jk2d\" (UID: \"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\") " pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.042506 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/490e7d88-ae7f-45f9-ab12-598c33e3bc69-cni-binary-copy\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.042695 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/b3393ec4-cc72-499a-8557-ec6ca329a142-cni-binary-copy\") pod \"multus-additional-cni-plugins-hn6x5\" (UID: \"b3393ec4-cc72-499a-8557-ec6ca329a142\") " pod="openshift-multus/multus-additional-cni-plugins-hn6x5" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.042777 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/490e7d88-ae7f-45f9-ab12-598c33e3bc69-multus-daemon-config\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.047138 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/454ee6da-70e5-4d30-89e5-19a35123a278-ovn-node-metrics-cert\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.053222 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:56Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.058793 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mhgm2\" (UniqueName: \"kubernetes.io/projected/454ee6da-70e5-4d30-89e5-19a35123a278-kube-api-access-mhgm2\") pod \"ovnkube-node-fdhhj\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.064031 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5sbcs\" (UniqueName: \"kubernetes.io/projected/490e7d88-ae7f-45f9-ab12-598c33e3bc69-kube-api-access-5sbcs\") pod \"multus-lxpjp\" (UID: \"490e7d88-ae7f-45f9-ab12-598c33e3bc69\") " pod="openshift-multus/multus-lxpjp" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.065223 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9v8cx\" (UniqueName: \"kubernetes.io/projected/8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb-kube-api-access-9v8cx\") pod \"machine-config-daemon-8jk2d\" (UID: \"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\") " pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.065585 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2lw2l\" (UniqueName: \"kubernetes.io/projected/b3393ec4-cc72-499a-8557-ec6ca329a142-kube-api-access-2lw2l\") pod \"multus-additional-cni-plugins-hn6x5\" (UID: \"b3393ec4-cc72-499a-8557-ec6ca329a142\") " pod="openshift-multus/multus-additional-cni-plugins-hn6x5" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.067578 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3393ec4-cc72-499a-8557-ec6ca329a142\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hn6x5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:56Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.082676 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-lxpjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"490e7d88-ae7f-45f9-ab12-598c33e3bc69\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5sbcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-lxpjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:56Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.096959 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.099636 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"821d0155-28e9-4160-8885-aa8cc1d60197\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"t denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 14:15:53.421172 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 14:15:53.421210 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421215 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421220 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 14:15:53.421225 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 14:15:53.421228 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 14:15:53.421231 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 14:15:53.421248 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1126 14:15:53.426755 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3728845122/tls.crt::/tmp/serving-cert-3728845122/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764166537\\\\\\\\\\\\\\\" (2025-11-26 14:15:36 +0000 UTC to 2025-12-26 14:15:37 +0000 UTC (now=2025-11-26 14:15:53.426718319 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426903 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764166548\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764166547\\\\\\\\\\\\\\\" (2025-11-26 13:15:47 +0000 UTC to 2026-11-26 13:15:47 +0000 UTC (now=2025-11-26 14:15:53.426883943 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426929 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1126 14:15:53.426951 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nF1126 14:15:53.427030 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:56Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.110317 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-lxpjp" Nov 26 14:15:56 crc kubenswrapper[5037]: W1126 14:15:56.113208 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3393ec4_cc72_499a_8557_ec6ca329a142.slice/crio-c6533f7c7489fb51580315fd62cd6418faf7e2eca4951222adc1ff197707207f WatchSource:0}: Error finding container c6533f7c7489fb51580315fd62cd6418faf7e2eca4951222adc1ff197707207f: Status 404 returned error can't find the container with id c6533f7c7489fb51580315fd62cd6418faf7e2eca4951222adc1ff197707207f Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.115101 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.120247 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:56Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.134723 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" event={"ID":"b3393ec4-cc72-499a-8557-ec6ca329a142","Type":"ContainerStarted","Data":"c6533f7c7489fb51580315fd62cd6418faf7e2eca4951222adc1ff197707207f"} Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.138781 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.142736 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-8tjq6" event={"ID":"8f4a637d-4b3f-4289-a84c-cd2559430a0e","Type":"ContainerStarted","Data":"ad15902c02983c178ab3ce11a5103fa144f6dd39fd78aa6243bf9babd10861e3"} Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.142819 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-8tjq6" event={"ID":"8f4a637d-4b3f-4289-a84c-cd2559430a0e","Type":"ContainerStarted","Data":"b1968aaf92d40c9e24ce231e423600800214b27435637414f1bfbdb8c3695b64"} Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.143134 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:56Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.143684 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.148576 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.166220 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8jk2d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:56Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.188994 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"454ee6da-70e5-4d30-89e5-19a35123a278\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fdhhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:56Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.204864 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"821d0155-28e9-4160-8885-aa8cc1d60197\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"t denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 14:15:53.421172 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 14:15:53.421210 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421215 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421220 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 14:15:53.421225 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 14:15:53.421228 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 14:15:53.421231 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 14:15:53.421248 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1126 14:15:53.426755 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3728845122/tls.crt::/tmp/serving-cert-3728845122/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764166537\\\\\\\\\\\\\\\" (2025-11-26 14:15:36 +0000 UTC to 2025-12-26 14:15:37 +0000 UTC (now=2025-11-26 14:15:53.426718319 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426903 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764166548\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764166547\\\\\\\\\\\\\\\" (2025-11-26 13:15:47 +0000 UTC to 2026-11-26 13:15:47 +0000 UTC (now=2025-11-26 14:15:53.426883943 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426929 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1126 14:15:53.426951 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nF1126 14:15:53.427030 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:56Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.226483 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:56Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.244456 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:56Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.260348 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8jk2d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:56Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.281428 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"454ee6da-70e5-4d30-89e5-19a35123a278\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fdhhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:56Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.297808 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2e7efb219e45d8ae5c49bd0dfaa921f6c02e4646ea234df0f8b1a3f50adab58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:56Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.310096 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8tjq6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f4a637d-4b3f-4289-a84c-cd2559430a0e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad15902c02983c178ab3ce11a5103fa144f6dd39fd78aa6243bf9babd10861e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mjhs8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8tjq6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:56Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.326883 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:56Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.341338 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1755f26d-9772-47cd-9336-8c3e94febe60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af730a97f25a795f2f5f5a9b59a3c72868fd1d8f16a451fed1f7ce947779786e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10f95ff9c8fe951bea68ca3932581ecdcb55eee4f45bd79eeeb314fbd67ee80\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://222073fcbe74545f98ff4e8e05ced7ddc2e23933edff2e2135da7fbc33cfac57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:56Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.355360 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:56Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.370104 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3393ec4-cc72-499a-8557-ec6ca329a142\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hn6x5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:56Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.387629 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-lxpjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"490e7d88-ae7f-45f9-ab12-598c33e3bc69\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5sbcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-lxpjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:56Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.405038 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92b37f5a43045595441dda27ecce78e85a7172a9f0b9301b713e4f639388be9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0c3205634aaa1ec9add93ff4da5799da6c5f8702a91abaa5d6b52dfc77a0ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:56Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.834361 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 26 14:15:56 crc kubenswrapper[5037]: I1126 14:15:56.845185 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb-proxy-tls\") pod \"machine-config-daemon-8jk2d\" (UID: \"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\") " pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.001902 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" Nov 26 14:15:57 crc kubenswrapper[5037]: W1126 14:15:57.014699 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8bbdf8d8_f2ed_4b76_929a_a1a6c07e85fb.slice/crio-7eea6751dfee7ee41b6b900c6fa3b9a80633c614cec5b531b958267acdfb6024 WatchSource:0}: Error finding container 7eea6751dfee7ee41b6b900c6fa3b9a80633c614cec5b531b958267acdfb6024: Status 404 returned error can't find the container with id 7eea6751dfee7ee41b6b900c6fa3b9a80633c614cec5b531b958267acdfb6024 Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.153751 5037 generic.go:334] "Generic (PLEG): container finished" podID="454ee6da-70e5-4d30-89e5-19a35123a278" containerID="fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58" exitCode=0 Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.153870 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" event={"ID":"454ee6da-70e5-4d30-89e5-19a35123a278","Type":"ContainerDied","Data":"fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58"} Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.153964 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" event={"ID":"454ee6da-70e5-4d30-89e5-19a35123a278","Type":"ContainerStarted","Data":"81c9fc75923a803479b6e97e591997f55110abcaac1efa759598b5d9a4677f84"} Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.157116 5037 generic.go:334] "Generic (PLEG): container finished" podID="b3393ec4-cc72-499a-8557-ec6ca329a142" containerID="2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301" exitCode=0 Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.157217 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" event={"ID":"b3393ec4-cc72-499a-8557-ec6ca329a142","Type":"ContainerDied","Data":"2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301"} Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.160895 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" event={"ID":"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb","Type":"ContainerStarted","Data":"7eea6751dfee7ee41b6b900c6fa3b9a80633c614cec5b531b958267acdfb6024"} Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.164554 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-lxpjp" event={"ID":"490e7d88-ae7f-45f9-ab12-598c33e3bc69","Type":"ContainerStarted","Data":"a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2"} Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.164624 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-lxpjp" event={"ID":"490e7d88-ae7f-45f9-ab12-598c33e3bc69","Type":"ContainerStarted","Data":"8771157a6ecf966f8cf5e82047037a9ab0e4c8919efec6c87639d672c00c23c4"} Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.169335 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"d163276317717346ec2c289a779a2784a200c0a4230bbcef92def1d1c55fcab2"} Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.169776 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8tjq6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f4a637d-4b3f-4289-a84c-cd2559430a0e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad15902c02983c178ab3ce11a5103fa144f6dd39fd78aa6243bf9babd10861e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mjhs8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8tjq6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:57Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.184400 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2e7efb219e45d8ae5c49bd0dfaa921f6c02e4646ea234df0f8b1a3f50adab58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:57Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.198391 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1755f26d-9772-47cd-9336-8c3e94febe60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af730a97f25a795f2f5f5a9b59a3c72868fd1d8f16a451fed1f7ce947779786e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10f95ff9c8fe951bea68ca3932581ecdcb55eee4f45bd79eeeb314fbd67ee80\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://222073fcbe74545f98ff4e8e05ced7ddc2e23933edff2e2135da7fbc33cfac57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:57Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.212968 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:57Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.230711 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-lxpjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"490e7d88-ae7f-45f9-ab12-598c33e3bc69\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5sbcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-lxpjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:57Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.247941 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92b37f5a43045595441dda27ecce78e85a7172a9f0b9301b713e4f639388be9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0c3205634aaa1ec9add93ff4da5799da6c5f8702a91abaa5d6b52dfc77a0ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:57Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.261631 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:57Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.285527 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3393ec4-cc72-499a-8557-ec6ca329a142\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hn6x5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:57Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.305936 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:57Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.312221 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-7bxxg"] Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.312717 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-7bxxg" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.315334 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.315508 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.315837 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.317249 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.322489 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8jk2d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:57Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.350425 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"454ee6da-70e5-4d30-89e5-19a35123a278\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fdhhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:57Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.367342 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"821d0155-28e9-4160-8885-aa8cc1d60197\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"t denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 14:15:53.421172 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 14:15:53.421210 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421215 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421220 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 14:15:53.421225 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 14:15:53.421228 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 14:15:53.421231 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 14:15:53.421248 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1126 14:15:53.426755 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3728845122/tls.crt::/tmp/serving-cert-3728845122/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764166537\\\\\\\\\\\\\\\" (2025-11-26 14:15:36 +0000 UTC to 2025-12-26 14:15:37 +0000 UTC (now=2025-11-26 14:15:53.426718319 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426903 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764166548\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764166547\\\\\\\\\\\\\\\" (2025-11-26 13:15:47 +0000 UTC to 2026-11-26 13:15:47 +0000 UTC (now=2025-11-26 14:15:53.426883943 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426929 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1126 14:15:53.426951 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nF1126 14:15:53.427030 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:57Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.381769 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:57Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.400745 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"454ee6da-70e5-4d30-89e5-19a35123a278\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fdhhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:57Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.416180 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"821d0155-28e9-4160-8885-aa8cc1d60197\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"t denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 14:15:53.421172 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 14:15:53.421210 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421215 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421220 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 14:15:53.421225 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 14:15:53.421228 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 14:15:53.421231 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 14:15:53.421248 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1126 14:15:53.426755 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3728845122/tls.crt::/tmp/serving-cert-3728845122/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764166537\\\\\\\\\\\\\\\" (2025-11-26 14:15:36 +0000 UTC to 2025-12-26 14:15:37 +0000 UTC (now=2025-11-26 14:15:53.426718319 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426903 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764166548\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764166547\\\\\\\\\\\\\\\" (2025-11-26 13:15:47 +0000 UTC to 2026-11-26 13:15:47 +0000 UTC (now=2025-11-26 14:15:53.426883943 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426929 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1126 14:15:53.426951 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nF1126 14:15:53.427030 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:57Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.431957 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:57Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.447765 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d163276317717346ec2c289a779a2784a200c0a4230bbcef92def1d1c55fcab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:57Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.455046 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/cd349c4b-e265-4484-ab92-b4328ebde7fc-serviceca\") pod \"node-ca-7bxxg\" (UID: \"cd349c4b-e265-4484-ab92-b4328ebde7fc\") " pod="openshift-image-registry/node-ca-7bxxg" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.455100 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/cd349c4b-e265-4484-ab92-b4328ebde7fc-host\") pod \"node-ca-7bxxg\" (UID: \"cd349c4b-e265-4484-ab92-b4328ebde7fc\") " pod="openshift-image-registry/node-ca-7bxxg" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.455167 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wrqz9\" (UniqueName: \"kubernetes.io/projected/cd349c4b-e265-4484-ab92-b4328ebde7fc-kube-api-access-wrqz9\") pod \"node-ca-7bxxg\" (UID: \"cd349c4b-e265-4484-ab92-b4328ebde7fc\") " pod="openshift-image-registry/node-ca-7bxxg" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.461443 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8jk2d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:57Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.475469 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2e7efb219e45d8ae5c49bd0dfaa921f6c02e4646ea234df0f8b1a3f50adab58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:57Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.487985 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8tjq6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f4a637d-4b3f-4289-a84c-cd2559430a0e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad15902c02983c178ab3ce11a5103fa144f6dd39fd78aa6243bf9babd10861e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mjhs8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8tjq6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:57Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.502110 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7bxxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd349c4b-e265-4484-ab92-b4328ebde7fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrqz9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7bxxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:57Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.516983 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1755f26d-9772-47cd-9336-8c3e94febe60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af730a97f25a795f2f5f5a9b59a3c72868fd1d8f16a451fed1f7ce947779786e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10f95ff9c8fe951bea68ca3932581ecdcb55eee4f45bd79eeeb314fbd67ee80\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://222073fcbe74545f98ff4e8e05ced7ddc2e23933edff2e2135da7fbc33cfac57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:57Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.537568 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:57Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.553731 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92b37f5a43045595441dda27ecce78e85a7172a9f0b9301b713e4f639388be9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0c3205634aaa1ec9add93ff4da5799da6c5f8702a91abaa5d6b52dfc77a0ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:57Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.556302 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wrqz9\" (UniqueName: \"kubernetes.io/projected/cd349c4b-e265-4484-ab92-b4328ebde7fc-kube-api-access-wrqz9\") pod \"node-ca-7bxxg\" (UID: \"cd349c4b-e265-4484-ab92-b4328ebde7fc\") " pod="openshift-image-registry/node-ca-7bxxg" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.556372 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/cd349c4b-e265-4484-ab92-b4328ebde7fc-serviceca\") pod \"node-ca-7bxxg\" (UID: \"cd349c4b-e265-4484-ab92-b4328ebde7fc\") " pod="openshift-image-registry/node-ca-7bxxg" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.556395 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/cd349c4b-e265-4484-ab92-b4328ebde7fc-host\") pod \"node-ca-7bxxg\" (UID: \"cd349c4b-e265-4484-ab92-b4328ebde7fc\") " pod="openshift-image-registry/node-ca-7bxxg" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.556462 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/cd349c4b-e265-4484-ab92-b4328ebde7fc-host\") pod \"node-ca-7bxxg\" (UID: \"cd349c4b-e265-4484-ab92-b4328ebde7fc\") " pod="openshift-image-registry/node-ca-7bxxg" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.559184 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/cd349c4b-e265-4484-ab92-b4328ebde7fc-serviceca\") pod \"node-ca-7bxxg\" (UID: \"cd349c4b-e265-4484-ab92-b4328ebde7fc\") " pod="openshift-image-registry/node-ca-7bxxg" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.572112 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:57Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.576743 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wrqz9\" (UniqueName: \"kubernetes.io/projected/cd349c4b-e265-4484-ab92-b4328ebde7fc-kube-api-access-wrqz9\") pod \"node-ca-7bxxg\" (UID: \"cd349c4b-e265-4484-ab92-b4328ebde7fc\") " pod="openshift-image-registry/node-ca-7bxxg" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.589450 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3393ec4-cc72-499a-8557-ec6ca329a142\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hn6x5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:57Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.603803 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-lxpjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"490e7d88-ae7f-45f9-ab12-598c33e3bc69\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5sbcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-lxpjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:57Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.626785 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-7bxxg" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.657051 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.657171 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.657215 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.657237 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.657258 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:15:57 crc kubenswrapper[5037]: E1126 14:15:57.657424 5037 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 14:15:57 crc kubenswrapper[5037]: E1126 14:15:57.657447 5037 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 14:15:57 crc kubenswrapper[5037]: E1126 14:15:57.657459 5037 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 14:15:57 crc kubenswrapper[5037]: E1126 14:15:57.657511 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 14:16:01.657496103 +0000 UTC m=+28.454266287 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 14:15:57 crc kubenswrapper[5037]: E1126 14:15:57.657564 5037 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 14:15:57 crc kubenswrapper[5037]: E1126 14:15:57.657577 5037 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 14:15:57 crc kubenswrapper[5037]: E1126 14:15:57.657584 5037 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 14:15:57 crc kubenswrapper[5037]: E1126 14:15:57.657604 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 14:16:01.657597525 +0000 UTC m=+28.454367709 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 14:15:57 crc kubenswrapper[5037]: E1126 14:15:57.657637 5037 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 14:15:57 crc kubenswrapper[5037]: E1126 14:15:57.657658 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 14:16:01.657650766 +0000 UTC m=+28.454420950 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 14:15:57 crc kubenswrapper[5037]: E1126 14:15:57.657721 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:16:01.657715878 +0000 UTC m=+28.454486062 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:15:57 crc kubenswrapper[5037]: E1126 14:15:57.657744 5037 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 14:15:57 crc kubenswrapper[5037]: E1126 14:15:57.657882 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 14:16:01.657834041 +0000 UTC m=+28.454604225 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.907472 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.907531 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:15:57 crc kubenswrapper[5037]: I1126 14:15:57.907544 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:15:57 crc kubenswrapper[5037]: E1126 14:15:57.907631 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:15:57 crc kubenswrapper[5037]: E1126 14:15:57.907720 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:15:57 crc kubenswrapper[5037]: E1126 14:15:57.907846 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.182957 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" event={"ID":"454ee6da-70e5-4d30-89e5-19a35123a278","Type":"ContainerStarted","Data":"d0319889506261e48b8db06cd292ba17fb46399b0b2063670c5c0e179a801f9c"} Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.183442 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" event={"ID":"454ee6da-70e5-4d30-89e5-19a35123a278","Type":"ContainerStarted","Data":"86a35a51d679468b21fdd174d7148d46f8c1acddbae627ed5c27b61aa399b897"} Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.183547 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" event={"ID":"454ee6da-70e5-4d30-89e5-19a35123a278","Type":"ContainerStarted","Data":"306a3ae23bf504e98f4e7be45cebf984a5dbf47fda9720237c881cf65de43b68"} Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.183640 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" event={"ID":"454ee6da-70e5-4d30-89e5-19a35123a278","Type":"ContainerStarted","Data":"31f1e8bfa4deb76c13528d9aa2414c14ba6cc0e4637f2cf84c153398b360cad4"} Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.183768 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" event={"ID":"454ee6da-70e5-4d30-89e5-19a35123a278","Type":"ContainerStarted","Data":"bf3e3bb0b0e0730b9bbd45aad381d5f38940c4a36676db5e9264ccb473f173f4"} Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.183864 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" event={"ID":"454ee6da-70e5-4d30-89e5-19a35123a278","Type":"ContainerStarted","Data":"feebe91c810ee2c7c5f9aefe54887ecbc31a89a83a03ac6bbac7f373e15752e4"} Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.186081 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" event={"ID":"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb","Type":"ContainerStarted","Data":"6bcabca26fad475e5fa46de4c0683cb4671a209cc69dbd1509f933cf799091e4"} Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.186141 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" event={"ID":"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb","Type":"ContainerStarted","Data":"ae57cbd99d2dcba3594b74304119a4a8030da193dce32afd77079b3cfaf45713"} Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.187590 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-7bxxg" event={"ID":"cd349c4b-e265-4484-ab92-b4328ebde7fc","Type":"ContainerStarted","Data":"ce7d3a1479d3bf371e9b7b4bc4c57843ede4b11d782b732245781ec31e0da71f"} Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.187735 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-7bxxg" event={"ID":"cd349c4b-e265-4484-ab92-b4328ebde7fc","Type":"ContainerStarted","Data":"3935294d6114cda0ee642d8b87c17b1a237cad51b17455579d5410f836998acd"} Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.189996 5037 generic.go:334] "Generic (PLEG): container finished" podID="b3393ec4-cc72-499a-8557-ec6ca329a142" containerID="ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4" exitCode=0 Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.190065 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" event={"ID":"b3393ec4-cc72-499a-8557-ec6ca329a142","Type":"ContainerDied","Data":"ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4"} Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.201509 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"821d0155-28e9-4160-8885-aa8cc1d60197\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"t denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 14:15:53.421172 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 14:15:53.421210 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421215 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421220 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 14:15:53.421225 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 14:15:53.421228 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 14:15:53.421231 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 14:15:53.421248 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1126 14:15:53.426755 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3728845122/tls.crt::/tmp/serving-cert-3728845122/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764166537\\\\\\\\\\\\\\\" (2025-11-26 14:15:36 +0000 UTC to 2025-12-26 14:15:37 +0000 UTC (now=2025-11-26 14:15:53.426718319 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426903 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764166548\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764166547\\\\\\\\\\\\\\\" (2025-11-26 13:15:47 +0000 UTC to 2026-11-26 13:15:47 +0000 UTC (now=2025-11-26 14:15:53.426883943 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426929 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1126 14:15:53.426951 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nF1126 14:15:53.427030 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:58Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.220024 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:58Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.235665 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d163276317717346ec2c289a779a2784a200c0a4230bbcef92def1d1c55fcab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:58Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.251308 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bcabca26fad475e5fa46de4c0683cb4671a209cc69dbd1509f933cf799091e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae57cbd99d2dcba3594b74304119a4a8030da193dce32afd77079b3cfaf45713\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8jk2d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:58Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.277566 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"454ee6da-70e5-4d30-89e5-19a35123a278\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fdhhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:58Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.301474 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2e7efb219e45d8ae5c49bd0dfaa921f6c02e4646ea234df0f8b1a3f50adab58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:58Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.314409 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8tjq6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f4a637d-4b3f-4289-a84c-cd2559430a0e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad15902c02983c178ab3ce11a5103fa144f6dd39fd78aa6243bf9babd10861e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mjhs8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8tjq6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:58Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.328507 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7bxxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd349c4b-e265-4484-ab92-b4328ebde7fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrqz9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7bxxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:58Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.342202 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:58Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.353883 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1755f26d-9772-47cd-9336-8c3e94febe60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af730a97f25a795f2f5f5a9b59a3c72868fd1d8f16a451fed1f7ce947779786e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10f95ff9c8fe951bea68ca3932581ecdcb55eee4f45bd79eeeb314fbd67ee80\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://222073fcbe74545f98ff4e8e05ced7ddc2e23933edff2e2135da7fbc33cfac57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:58Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.369572 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:58Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.387349 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3393ec4-cc72-499a-8557-ec6ca329a142\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hn6x5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:58Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.405306 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-lxpjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"490e7d88-ae7f-45f9-ab12-598c33e3bc69\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5sbcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-lxpjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:58Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.416694 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92b37f5a43045595441dda27ecce78e85a7172a9f0b9301b713e4f639388be9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0c3205634aaa1ec9add93ff4da5799da6c5f8702a91abaa5d6b52dfc77a0ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:58Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.429648 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92b37f5a43045595441dda27ecce78e85a7172a9f0b9301b713e4f639388be9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0c3205634aaa1ec9add93ff4da5799da6c5f8702a91abaa5d6b52dfc77a0ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:58Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.442058 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:58Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.454160 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3393ec4-cc72-499a-8557-ec6ca329a142\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hn6x5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:58Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.465691 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-lxpjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"490e7d88-ae7f-45f9-ab12-598c33e3bc69\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5sbcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-lxpjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:58Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.477896 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"821d0155-28e9-4160-8885-aa8cc1d60197\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"t denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 14:15:53.421172 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 14:15:53.421210 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421215 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421220 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 14:15:53.421225 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 14:15:53.421228 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 14:15:53.421231 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 14:15:53.421248 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1126 14:15:53.426755 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3728845122/tls.crt::/tmp/serving-cert-3728845122/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764166537\\\\\\\\\\\\\\\" (2025-11-26 14:15:36 +0000 UTC to 2025-12-26 14:15:37 +0000 UTC (now=2025-11-26 14:15:53.426718319 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426903 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764166548\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764166547\\\\\\\\\\\\\\\" (2025-11-26 13:15:47 +0000 UTC to 2026-11-26 13:15:47 +0000 UTC (now=2025-11-26 14:15:53.426883943 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426929 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1126 14:15:53.426951 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nF1126 14:15:53.427030 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:58Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.489702 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:58Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.502357 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d163276317717346ec2c289a779a2784a200c0a4230bbcef92def1d1c55fcab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:58Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.515257 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bcabca26fad475e5fa46de4c0683cb4671a209cc69dbd1509f933cf799091e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae57cbd99d2dcba3594b74304119a4a8030da193dce32afd77079b3cfaf45713\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8jk2d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:58Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.535675 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"454ee6da-70e5-4d30-89e5-19a35123a278\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fdhhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:58Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.552577 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2e7efb219e45d8ae5c49bd0dfaa921f6c02e4646ea234df0f8b1a3f50adab58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:58Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.570404 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8tjq6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f4a637d-4b3f-4289-a84c-cd2559430a0e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad15902c02983c178ab3ce11a5103fa144f6dd39fd78aa6243bf9babd10861e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mjhs8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8tjq6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:58Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.582797 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7bxxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd349c4b-e265-4484-ab92-b4328ebde7fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7d3a1479d3bf371e9b7b4bc4c57843ede4b11d782b732245781ec31e0da71f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrqz9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7bxxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:58Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.596919 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1755f26d-9772-47cd-9336-8c3e94febe60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af730a97f25a795f2f5f5a9b59a3c72868fd1d8f16a451fed1f7ce947779786e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10f95ff9c8fe951bea68ca3932581ecdcb55eee4f45bd79eeeb314fbd67ee80\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://222073fcbe74545f98ff4e8e05ced7ddc2e23933edff2e2135da7fbc33cfac57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:58Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:58 crc kubenswrapper[5037]: I1126 14:15:58.615414 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:58Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.196013 5037 generic.go:334] "Generic (PLEG): container finished" podID="b3393ec4-cc72-499a-8557-ec6ca329a142" containerID="bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082" exitCode=0 Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.196074 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" event={"ID":"b3393ec4-cc72-499a-8557-ec6ca329a142","Type":"ContainerDied","Data":"bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082"} Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.213047 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7bxxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd349c4b-e265-4484-ab92-b4328ebde7fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7d3a1479d3bf371e9b7b4bc4c57843ede4b11d782b732245781ec31e0da71f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrqz9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7bxxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:59Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.231736 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2e7efb219e45d8ae5c49bd0dfaa921f6c02e4646ea234df0f8b1a3f50adab58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:59Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.246150 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8tjq6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f4a637d-4b3f-4289-a84c-cd2559430a0e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad15902c02983c178ab3ce11a5103fa144f6dd39fd78aa6243bf9babd10861e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mjhs8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8tjq6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:59Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.264850 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1755f26d-9772-47cd-9336-8c3e94febe60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af730a97f25a795f2f5f5a9b59a3c72868fd1d8f16a451fed1f7ce947779786e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10f95ff9c8fe951bea68ca3932581ecdcb55eee4f45bd79eeeb314fbd67ee80\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://222073fcbe74545f98ff4e8e05ced7ddc2e23933edff2e2135da7fbc33cfac57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:59Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.280314 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:59Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.297526 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92b37f5a43045595441dda27ecce78e85a7172a9f0b9301b713e4f639388be9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0c3205634aaa1ec9add93ff4da5799da6c5f8702a91abaa5d6b52dfc77a0ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:59Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.311172 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:59Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.325264 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3393ec4-cc72-499a-8557-ec6ca329a142\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hn6x5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:59Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.339657 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-lxpjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"490e7d88-ae7f-45f9-ab12-598c33e3bc69\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5sbcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-lxpjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:59Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.356555 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bcabca26fad475e5fa46de4c0683cb4671a209cc69dbd1509f933cf799091e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae57cbd99d2dcba3594b74304119a4a8030da193dce32afd77079b3cfaf45713\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8jk2d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:59Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.374527 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"454ee6da-70e5-4d30-89e5-19a35123a278\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fdhhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:59Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.387084 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"821d0155-28e9-4160-8885-aa8cc1d60197\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"t denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 14:15:53.421172 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 14:15:53.421210 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421215 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421220 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 14:15:53.421225 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 14:15:53.421228 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 14:15:53.421231 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 14:15:53.421248 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1126 14:15:53.426755 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3728845122/tls.crt::/tmp/serving-cert-3728845122/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764166537\\\\\\\\\\\\\\\" (2025-11-26 14:15:36 +0000 UTC to 2025-12-26 14:15:37 +0000 UTC (now=2025-11-26 14:15:53.426718319 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426903 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764166548\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764166547\\\\\\\\\\\\\\\" (2025-11-26 13:15:47 +0000 UTC to 2026-11-26 13:15:47 +0000 UTC (now=2025-11-26 14:15:53.426883943 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426929 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1126 14:15:53.426951 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nF1126 14:15:53.427030 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:59Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.398846 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:59Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.411317 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d163276317717346ec2c289a779a2784a200c0a4230bbcef92def1d1c55fcab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:59Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.799829 5037 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.802793 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.802873 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.802893 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.803174 5037 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.812421 5037 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.812951 5037 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.814623 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.814680 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.814702 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.814726 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.814746 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:15:59Z","lastTransitionTime":"2025-11-26T14:15:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:15:59 crc kubenswrapper[5037]: E1126 14:15:59.837844 5037 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b247aecb-f60a-4360-9d1b-a1f9057dc4ca\\\",\\\"systemUUID\\\":\\\"4d169cbc-8c3f-42b1-afc1-3f5b57e5ed06\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:59Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.843000 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.843055 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.843071 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.843094 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.843109 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:15:59Z","lastTransitionTime":"2025-11-26T14:15:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:15:59 crc kubenswrapper[5037]: E1126 14:15:59.858341 5037 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b247aecb-f60a-4360-9d1b-a1f9057dc4ca\\\",\\\"systemUUID\\\":\\\"4d169cbc-8c3f-42b1-afc1-3f5b57e5ed06\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:59Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.863081 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.863136 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.863149 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.863174 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.863188 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:15:59Z","lastTransitionTime":"2025-11-26T14:15:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:15:59 crc kubenswrapper[5037]: E1126 14:15:59.881520 5037 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b247aecb-f60a-4360-9d1b-a1f9057dc4ca\\\",\\\"systemUUID\\\":\\\"4d169cbc-8c3f-42b1-afc1-3f5b57e5ed06\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:59Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.885485 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.885523 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.885533 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.885549 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.885560 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:15:59Z","lastTransitionTime":"2025-11-26T14:15:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:15:59 crc kubenswrapper[5037]: E1126 14:15:59.905828 5037 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b247aecb-f60a-4360-9d1b-a1f9057dc4ca\\\",\\\"systemUUID\\\":\\\"4d169cbc-8c3f-42b1-afc1-3f5b57e5ed06\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:59Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.907971 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.908087 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:15:59 crc kubenswrapper[5037]: E1126 14:15:59.908146 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.908252 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:15:59 crc kubenswrapper[5037]: E1126 14:15:59.908390 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:15:59 crc kubenswrapper[5037]: E1126 14:15:59.908477 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.928445 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.928512 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.928532 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.928560 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.928583 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:15:59Z","lastTransitionTime":"2025-11-26T14:15:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:15:59 crc kubenswrapper[5037]: E1126 14:15:59.945407 5037 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b247aecb-f60a-4360-9d1b-a1f9057dc4ca\\\",\\\"systemUUID\\\":\\\"4d169cbc-8c3f-42b1-afc1-3f5b57e5ed06\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:15:59Z is after 2025-08-24T17:21:41Z" Nov 26 14:15:59 crc kubenswrapper[5037]: E1126 14:15:59.945639 5037 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.952549 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.952635 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.952682 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.952730 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:15:59 crc kubenswrapper[5037]: I1126 14:15:59.952760 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:15:59Z","lastTransitionTime":"2025-11-26T14:15:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.056136 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.056643 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.056656 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.056675 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.056713 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:00Z","lastTransitionTime":"2025-11-26T14:16:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.160210 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.160249 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.160263 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.160304 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.160321 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:00Z","lastTransitionTime":"2025-11-26T14:16:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.202195 5037 generic.go:334] "Generic (PLEG): container finished" podID="b3393ec4-cc72-499a-8557-ec6ca329a142" containerID="778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b" exitCode=0 Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.202308 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" event={"ID":"b3393ec4-cc72-499a-8557-ec6ca329a142","Type":"ContainerDied","Data":"778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b"} Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.207797 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" event={"ID":"454ee6da-70e5-4d30-89e5-19a35123a278","Type":"ContainerStarted","Data":"ae3b5707990abcd8005bb71376bda6e0f62c32c806b11c5db27e0e06e5ca90c2"} Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.230043 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1755f26d-9772-47cd-9336-8c3e94febe60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af730a97f25a795f2f5f5a9b59a3c72868fd1d8f16a451fed1f7ce947779786e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10f95ff9c8fe951bea68ca3932581ecdcb55eee4f45bd79eeeb314fbd67ee80\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://222073fcbe74545f98ff4e8e05ced7ddc2e23933edff2e2135da7fbc33cfac57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:00Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.249442 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:00Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.263454 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.263567 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.263584 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.263608 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.263644 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:00Z","lastTransitionTime":"2025-11-26T14:16:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.267812 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92b37f5a43045595441dda27ecce78e85a7172a9f0b9301b713e4f639388be9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0c3205634aaa1ec9add93ff4da5799da6c5f8702a91abaa5d6b52dfc77a0ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:00Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.280110 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:00Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.297080 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3393ec4-cc72-499a-8557-ec6ca329a142\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hn6x5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:00Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.314809 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-lxpjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"490e7d88-ae7f-45f9-ab12-598c33e3bc69\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5sbcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-lxpjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:00Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.333068 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"821d0155-28e9-4160-8885-aa8cc1d60197\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"t denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 14:15:53.421172 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 14:15:53.421210 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421215 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421220 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 14:15:53.421225 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 14:15:53.421228 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 14:15:53.421231 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 14:15:53.421248 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1126 14:15:53.426755 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3728845122/tls.crt::/tmp/serving-cert-3728845122/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764166537\\\\\\\\\\\\\\\" (2025-11-26 14:15:36 +0000 UTC to 2025-12-26 14:15:37 +0000 UTC (now=2025-11-26 14:15:53.426718319 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426903 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764166548\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764166547\\\\\\\\\\\\\\\" (2025-11-26 13:15:47 +0000 UTC to 2026-11-26 13:15:47 +0000 UTC (now=2025-11-26 14:15:53.426883943 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426929 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1126 14:15:53.426951 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nF1126 14:15:53.427030 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:00Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.350101 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:00Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.368851 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.368903 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.368922 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.368947 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.368965 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:00Z","lastTransitionTime":"2025-11-26T14:16:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.369208 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d163276317717346ec2c289a779a2784a200c0a4230bbcef92def1d1c55fcab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:00Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.390972 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bcabca26fad475e5fa46de4c0683cb4671a209cc69dbd1509f933cf799091e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae57cbd99d2dcba3594b74304119a4a8030da193dce32afd77079b3cfaf45713\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8jk2d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:00Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.422382 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"454ee6da-70e5-4d30-89e5-19a35123a278\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fdhhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:00Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.440899 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2e7efb219e45d8ae5c49bd0dfaa921f6c02e4646ea234df0f8b1a3f50adab58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:00Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.457402 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8tjq6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f4a637d-4b3f-4289-a84c-cd2559430a0e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad15902c02983c178ab3ce11a5103fa144f6dd39fd78aa6243bf9babd10861e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mjhs8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8tjq6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:00Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.470188 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7bxxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd349c4b-e265-4484-ab92-b4328ebde7fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7d3a1479d3bf371e9b7b4bc4c57843ede4b11d782b732245781ec31e0da71f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrqz9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7bxxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:00Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.471824 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.471861 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.471872 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.471890 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.471902 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:00Z","lastTransitionTime":"2025-11-26T14:16:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.576262 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.576336 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.576356 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.576379 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.576392 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:00Z","lastTransitionTime":"2025-11-26T14:16:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.679119 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.679174 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.679187 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.679208 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.679221 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:00Z","lastTransitionTime":"2025-11-26T14:16:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.786569 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.786628 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.786639 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.786659 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.786674 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:00Z","lastTransitionTime":"2025-11-26T14:16:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.889988 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.890049 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.890066 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.890092 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.890109 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:00Z","lastTransitionTime":"2025-11-26T14:16:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.992984 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.993044 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.993058 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.993077 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:00 crc kubenswrapper[5037]: I1126 14:16:00.993090 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:00Z","lastTransitionTime":"2025-11-26T14:16:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.096352 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.096409 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.096422 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.096442 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.096458 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:01Z","lastTransitionTime":"2025-11-26T14:16:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.200253 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.200801 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.200816 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.200839 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.200851 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:01Z","lastTransitionTime":"2025-11-26T14:16:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.215116 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" event={"ID":"b3393ec4-cc72-499a-8557-ec6ca329a142","Type":"ContainerStarted","Data":"7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c"} Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.238396 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"821d0155-28e9-4160-8885-aa8cc1d60197\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"t denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 14:15:53.421172 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 14:15:53.421210 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421215 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421220 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 14:15:53.421225 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 14:15:53.421228 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 14:15:53.421231 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 14:15:53.421248 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1126 14:15:53.426755 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3728845122/tls.crt::/tmp/serving-cert-3728845122/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764166537\\\\\\\\\\\\\\\" (2025-11-26 14:15:36 +0000 UTC to 2025-12-26 14:15:37 +0000 UTC (now=2025-11-26 14:15:53.426718319 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426903 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764166548\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764166547\\\\\\\\\\\\\\\" (2025-11-26 13:15:47 +0000 UTC to 2026-11-26 13:15:47 +0000 UTC (now=2025-11-26 14:15:53.426883943 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426929 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1126 14:15:53.426951 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nF1126 14:15:53.427030 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:01Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.256384 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:01Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.273740 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d163276317717346ec2c289a779a2784a200c0a4230bbcef92def1d1c55fcab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:01Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.288357 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bcabca26fad475e5fa46de4c0683cb4671a209cc69dbd1509f933cf799091e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae57cbd99d2dcba3594b74304119a4a8030da193dce32afd77079b3cfaf45713\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8jk2d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:01Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.304145 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.304202 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.304221 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.304246 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.304260 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:01Z","lastTransitionTime":"2025-11-26T14:16:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.309086 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"454ee6da-70e5-4d30-89e5-19a35123a278\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fdhhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:01Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.325802 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2e7efb219e45d8ae5c49bd0dfaa921f6c02e4646ea234df0f8b1a3f50adab58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:01Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.340522 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8tjq6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f4a637d-4b3f-4289-a84c-cd2559430a0e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad15902c02983c178ab3ce11a5103fa144f6dd39fd78aa6243bf9babd10861e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mjhs8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8tjq6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:01Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.353198 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7bxxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd349c4b-e265-4484-ab92-b4328ebde7fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7d3a1479d3bf371e9b7b4bc4c57843ede4b11d782b732245781ec31e0da71f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrqz9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7bxxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:01Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.367056 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1755f26d-9772-47cd-9336-8c3e94febe60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af730a97f25a795f2f5f5a9b59a3c72868fd1d8f16a451fed1f7ce947779786e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10f95ff9c8fe951bea68ca3932581ecdcb55eee4f45bd79eeeb314fbd67ee80\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://222073fcbe74545f98ff4e8e05ced7ddc2e23933edff2e2135da7fbc33cfac57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:01Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.381645 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:01Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.398177 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92b37f5a43045595441dda27ecce78e85a7172a9f0b9301b713e4f639388be9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0c3205634aaa1ec9add93ff4da5799da6c5f8702a91abaa5d6b52dfc77a0ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:01Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.406196 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.406252 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.406270 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.406330 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.406353 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:01Z","lastTransitionTime":"2025-11-26T14:16:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.411005 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:01Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.427051 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3393ec4-cc72-499a-8557-ec6ca329a142\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hn6x5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:01Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.444383 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-lxpjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"490e7d88-ae7f-45f9-ab12-598c33e3bc69\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5sbcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-lxpjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:01Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.508595 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.508655 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.508668 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.508689 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.508709 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:01Z","lastTransitionTime":"2025-11-26T14:16:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.612478 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.612525 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.612534 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.612554 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.612565 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:01Z","lastTransitionTime":"2025-11-26T14:16:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.706063 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.706232 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.706262 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.706306 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.706327 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:16:01 crc kubenswrapper[5037]: E1126 14:16:01.706448 5037 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 14:16:01 crc kubenswrapper[5037]: E1126 14:16:01.706508 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 14:16:09.706490101 +0000 UTC m=+36.503260285 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 14:16:01 crc kubenswrapper[5037]: E1126 14:16:01.706525 5037 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 14:16:01 crc kubenswrapper[5037]: E1126 14:16:01.706583 5037 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 14:16:01 crc kubenswrapper[5037]: E1126 14:16:01.706624 5037 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 14:16:01 crc kubenswrapper[5037]: E1126 14:16:01.706642 5037 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 14:16:01 crc kubenswrapper[5037]: E1126 14:16:01.706649 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:16:09.706618904 +0000 UTC m=+36.503389108 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:16:01 crc kubenswrapper[5037]: E1126 14:16:01.706679 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 14:16:09.706668555 +0000 UTC m=+36.503438749 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 14:16:01 crc kubenswrapper[5037]: E1126 14:16:01.706700 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 14:16:09.706693055 +0000 UTC m=+36.503463249 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 14:16:01 crc kubenswrapper[5037]: E1126 14:16:01.706735 5037 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 14:16:01 crc kubenswrapper[5037]: E1126 14:16:01.706755 5037 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 14:16:01 crc kubenswrapper[5037]: E1126 14:16:01.706768 5037 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 14:16:01 crc kubenswrapper[5037]: E1126 14:16:01.706803 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 14:16:09.706790728 +0000 UTC m=+36.503560922 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.714722 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.714780 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.714791 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.714828 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.714840 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:01Z","lastTransitionTime":"2025-11-26T14:16:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.817437 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.817490 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.817515 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.817538 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.817553 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:01Z","lastTransitionTime":"2025-11-26T14:16:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.907869 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:16:01 crc kubenswrapper[5037]: E1126 14:16:01.908011 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.907891 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:16:01 crc kubenswrapper[5037]: E1126 14:16:01.908080 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.907869 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:16:01 crc kubenswrapper[5037]: E1126 14:16:01.908125 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.919590 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.919623 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.919631 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.919643 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:01 crc kubenswrapper[5037]: I1126 14:16:01.919653 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:01Z","lastTransitionTime":"2025-11-26T14:16:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.022223 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.022262 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.022272 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.022303 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.022313 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:02Z","lastTransitionTime":"2025-11-26T14:16:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.124600 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.124654 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.124665 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.124679 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.124690 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:02Z","lastTransitionTime":"2025-11-26T14:16:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.221702 5037 generic.go:334] "Generic (PLEG): container finished" podID="b3393ec4-cc72-499a-8557-ec6ca329a142" containerID="7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c" exitCode=0 Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.221762 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" event={"ID":"b3393ec4-cc72-499a-8557-ec6ca329a142","Type":"ContainerDied","Data":"7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c"} Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.227203 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.227253 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.227268 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.227313 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.227340 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:02Z","lastTransitionTime":"2025-11-26T14:16:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.237420 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1755f26d-9772-47cd-9336-8c3e94febe60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af730a97f25a795f2f5f5a9b59a3c72868fd1d8f16a451fed1f7ce947779786e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10f95ff9c8fe951bea68ca3932581ecdcb55eee4f45bd79eeeb314fbd67ee80\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://222073fcbe74545f98ff4e8e05ced7ddc2e23933edff2e2135da7fbc33cfac57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:02Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.253189 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:02Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.269931 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92b37f5a43045595441dda27ecce78e85a7172a9f0b9301b713e4f639388be9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0c3205634aaa1ec9add93ff4da5799da6c5f8702a91abaa5d6b52dfc77a0ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:02Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.286171 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:02Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.305383 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3393ec4-cc72-499a-8557-ec6ca329a142\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hn6x5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:02Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.325456 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-lxpjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"490e7d88-ae7f-45f9-ab12-598c33e3bc69\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5sbcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-lxpjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:02Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.331419 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.331477 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.331489 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.331507 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.331519 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:02Z","lastTransitionTime":"2025-11-26T14:16:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.342651 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"821d0155-28e9-4160-8885-aa8cc1d60197\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"t denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 14:15:53.421172 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 14:15:53.421210 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421215 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421220 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 14:15:53.421225 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 14:15:53.421228 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 14:15:53.421231 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 14:15:53.421248 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1126 14:15:53.426755 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3728845122/tls.crt::/tmp/serving-cert-3728845122/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764166537\\\\\\\\\\\\\\\" (2025-11-26 14:15:36 +0000 UTC to 2025-12-26 14:15:37 +0000 UTC (now=2025-11-26 14:15:53.426718319 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426903 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764166548\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764166547\\\\\\\\\\\\\\\" (2025-11-26 13:15:47 +0000 UTC to 2026-11-26 13:15:47 +0000 UTC (now=2025-11-26 14:15:53.426883943 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426929 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1126 14:15:53.426951 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nF1126 14:15:53.427030 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:02Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.357115 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:02Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.371345 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d163276317717346ec2c289a779a2784a200c0a4230bbcef92def1d1c55fcab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:02Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.385142 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bcabca26fad475e5fa46de4c0683cb4671a209cc69dbd1509f933cf799091e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae57cbd99d2dcba3594b74304119a4a8030da193dce32afd77079b3cfaf45713\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8jk2d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:02Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.408379 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"454ee6da-70e5-4d30-89e5-19a35123a278\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fdhhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:02Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.425029 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2e7efb219e45d8ae5c49bd0dfaa921f6c02e4646ea234df0f8b1a3f50adab58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:02Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.434182 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.434208 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.434216 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.434231 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.434241 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:02Z","lastTransitionTime":"2025-11-26T14:16:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.437225 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8tjq6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f4a637d-4b3f-4289-a84c-cd2559430a0e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad15902c02983c178ab3ce11a5103fa144f6dd39fd78aa6243bf9babd10861e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mjhs8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8tjq6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:02Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.450229 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7bxxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd349c4b-e265-4484-ab92-b4328ebde7fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7d3a1479d3bf371e9b7b4bc4c57843ede4b11d782b732245781ec31e0da71f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrqz9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7bxxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:02Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.537657 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.537725 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.537740 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.537763 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.537777 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:02Z","lastTransitionTime":"2025-11-26T14:16:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.640520 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.640576 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.640590 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.640616 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.640633 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:02Z","lastTransitionTime":"2025-11-26T14:16:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.743654 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.743723 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.743746 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.743780 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.743799 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:02Z","lastTransitionTime":"2025-11-26T14:16:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.847849 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.847899 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.847912 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.847931 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.847944 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:02Z","lastTransitionTime":"2025-11-26T14:16:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.951073 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.951129 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.951141 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.951159 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:02 crc kubenswrapper[5037]: I1126 14:16:02.951172 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:02Z","lastTransitionTime":"2025-11-26T14:16:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.053511 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.053555 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.053564 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.053580 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.053591 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:03Z","lastTransitionTime":"2025-11-26T14:16:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.156729 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.157150 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.157163 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.157186 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.157199 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:03Z","lastTransitionTime":"2025-11-26T14:16:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.235498 5037 generic.go:334] "Generic (PLEG): container finished" podID="b3393ec4-cc72-499a-8557-ec6ca329a142" containerID="953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c" exitCode=0 Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.235576 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" event={"ID":"b3393ec4-cc72-499a-8557-ec6ca329a142","Type":"ContainerDied","Data":"953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c"} Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.248610 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" event={"ID":"454ee6da-70e5-4d30-89e5-19a35123a278","Type":"ContainerStarted","Data":"374f3356b5dd72f14cd7dceb298e27429ba58d7fae836a8c08c06501d2694060"} Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.248982 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.249018 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.251846 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92b37f5a43045595441dda27ecce78e85a7172a9f0b9301b713e4f639388be9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0c3205634aaa1ec9add93ff4da5799da6c5f8702a91abaa5d6b52dfc77a0ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:03Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.259733 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.259761 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.259773 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.259792 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.259805 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:03Z","lastTransitionTime":"2025-11-26T14:16:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.269693 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:03Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.277811 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.278644 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.289264 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3393ec4-cc72-499a-8557-ec6ca329a142\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hn6x5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:03Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.302064 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-lxpjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"490e7d88-ae7f-45f9-ab12-598c33e3bc69\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5sbcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-lxpjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:03Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.315409 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.320459 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"454ee6da-70e5-4d30-89e5-19a35123a278\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fdhhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:03Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.335006 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"821d0155-28e9-4160-8885-aa8cc1d60197\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"t denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 14:15:53.421172 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 14:15:53.421210 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421215 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421220 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 14:15:53.421225 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 14:15:53.421228 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 14:15:53.421231 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 14:15:53.421248 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1126 14:15:53.426755 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3728845122/tls.crt::/tmp/serving-cert-3728845122/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764166537\\\\\\\\\\\\\\\" (2025-11-26 14:15:36 +0000 UTC to 2025-12-26 14:15:37 +0000 UTC (now=2025-11-26 14:15:53.426718319 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426903 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764166548\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764166547\\\\\\\\\\\\\\\" (2025-11-26 13:15:47 +0000 UTC to 2026-11-26 13:15:47 +0000 UTC (now=2025-11-26 14:15:53.426883943 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426929 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1126 14:15:53.426951 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nF1126 14:15:53.427030 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:03Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.341334 5037 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.347415 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:03Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.358221 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d163276317717346ec2c289a779a2784a200c0a4230bbcef92def1d1c55fcab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:03Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.362052 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.362094 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.362103 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.362120 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.362134 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:03Z","lastTransitionTime":"2025-11-26T14:16:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.371106 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bcabca26fad475e5fa46de4c0683cb4671a209cc69dbd1509f933cf799091e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae57cbd99d2dcba3594b74304119a4a8030da193dce32afd77079b3cfaf45713\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8jk2d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:03Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.390851 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2e7efb219e45d8ae5c49bd0dfaa921f6c02e4646ea234df0f8b1a3f50adab58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:03Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.401333 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8tjq6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f4a637d-4b3f-4289-a84c-cd2559430a0e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad15902c02983c178ab3ce11a5103fa144f6dd39fd78aa6243bf9babd10861e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mjhs8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8tjq6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:03Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.412854 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7bxxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd349c4b-e265-4484-ab92-b4328ebde7fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7d3a1479d3bf371e9b7b4bc4c57843ede4b11d782b732245781ec31e0da71f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrqz9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7bxxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:03Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.427373 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1755f26d-9772-47cd-9336-8c3e94febe60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af730a97f25a795f2f5f5a9b59a3c72868fd1d8f16a451fed1f7ce947779786e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10f95ff9c8fe951bea68ca3932581ecdcb55eee4f45bd79eeeb314fbd67ee80\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://222073fcbe74545f98ff4e8e05ced7ddc2e23933edff2e2135da7fbc33cfac57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:03Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.440919 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:03Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.454068 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1755f26d-9772-47cd-9336-8c3e94febe60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af730a97f25a795f2f5f5a9b59a3c72868fd1d8f16a451fed1f7ce947779786e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10f95ff9c8fe951bea68ca3932581ecdcb55eee4f45bd79eeeb314fbd67ee80\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://222073fcbe74545f98ff4e8e05ced7ddc2e23933edff2e2135da7fbc33cfac57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:03Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.465150 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.465236 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.465251 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.465273 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.465307 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:03Z","lastTransitionTime":"2025-11-26T14:16:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.467463 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:03Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.480567 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-lxpjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"490e7d88-ae7f-45f9-ab12-598c33e3bc69\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5sbcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-lxpjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:03Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.492018 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92b37f5a43045595441dda27ecce78e85a7172a9f0b9301b713e4f639388be9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0c3205634aaa1ec9add93ff4da5799da6c5f8702a91abaa5d6b52dfc77a0ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:03Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.502922 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:03Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.517177 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3393ec4-cc72-499a-8557-ec6ca329a142\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hn6x5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:03Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.529594 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d163276317717346ec2c289a779a2784a200c0a4230bbcef92def1d1c55fcab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:03Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.541703 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bcabca26fad475e5fa46de4c0683cb4671a209cc69dbd1509f933cf799091e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae57cbd99d2dcba3594b74304119a4a8030da193dce32afd77079b3cfaf45713\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8jk2d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:03Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.560451 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"454ee6da-70e5-4d30-89e5-19a35123a278\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f1e8bfa4deb76c13528d9aa2414c14ba6cc0e4637f2cf84c153398b360cad4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://306a3ae23bf504e98f4e7be45cebf984a5dbf47fda9720237c881cf65de43b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0319889506261e48b8db06cd292ba17fb46399b0b2063670c5c0e179a801f9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86a35a51d679468b21fdd174d7148d46f8c1acddbae627ed5c27b61aa399b897\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf3e3bb0b0e0730b9bbd45aad381d5f38940c4a36676db5e9264ccb473f173f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feebe91c810ee2c7c5f9aefe54887ecbc31a89a83a03ac6bbac7f373e15752e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://374f3356b5dd72f14cd7dceb298e27429ba58d7fae836a8c08c06501d2694060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3b5707990abcd8005bb71376bda6e0f62c32c806b11c5db27e0e06e5ca90c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fdhhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:03Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.568209 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.568255 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.568269 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.568313 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.568326 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:03Z","lastTransitionTime":"2025-11-26T14:16:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.574508 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"821d0155-28e9-4160-8885-aa8cc1d60197\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"t denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 14:15:53.421172 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 14:15:53.421210 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421215 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421220 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 14:15:53.421225 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 14:15:53.421228 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 14:15:53.421231 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 14:15:53.421248 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1126 14:15:53.426755 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3728845122/tls.crt::/tmp/serving-cert-3728845122/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764166537\\\\\\\\\\\\\\\" (2025-11-26 14:15:36 +0000 UTC to 2025-12-26 14:15:37 +0000 UTC (now=2025-11-26 14:15:53.426718319 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426903 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764166548\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764166547\\\\\\\\\\\\\\\" (2025-11-26 13:15:47 +0000 UTC to 2026-11-26 13:15:47 +0000 UTC (now=2025-11-26 14:15:53.426883943 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426929 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1126 14:15:53.426951 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nF1126 14:15:53.427030 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:03Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.587793 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:03Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.599375 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8tjq6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f4a637d-4b3f-4289-a84c-cd2559430a0e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad15902c02983c178ab3ce11a5103fa144f6dd39fd78aa6243bf9babd10861e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mjhs8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8tjq6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:03Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.611884 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7bxxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd349c4b-e265-4484-ab92-b4328ebde7fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7d3a1479d3bf371e9b7b4bc4c57843ede4b11d782b732245781ec31e0da71f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrqz9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7bxxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:03Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.624101 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2e7efb219e45d8ae5c49bd0dfaa921f6c02e4646ea234df0f8b1a3f50adab58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:03Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.671331 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.671380 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.671392 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.671411 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.671423 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:03Z","lastTransitionTime":"2025-11-26T14:16:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.774426 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.774481 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.774492 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.774510 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.774521 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:03Z","lastTransitionTime":"2025-11-26T14:16:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.877468 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.877521 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.877534 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.877556 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.877569 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:03Z","lastTransitionTime":"2025-11-26T14:16:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.907950 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.908014 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:16:03 crc kubenswrapper[5037]: E1126 14:16:03.908151 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.908221 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:16:03 crc kubenswrapper[5037]: E1126 14:16:03.908319 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:16:03 crc kubenswrapper[5037]: E1126 14:16:03.908842 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.924418 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1755f26d-9772-47cd-9336-8c3e94febe60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af730a97f25a795f2f5f5a9b59a3c72868fd1d8f16a451fed1f7ce947779786e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10f95ff9c8fe951bea68ca3932581ecdcb55eee4f45bd79eeeb314fbd67ee80\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://222073fcbe74545f98ff4e8e05ced7ddc2e23933edff2e2135da7fbc33cfac57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:03Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.940542 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:03Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.959073 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92b37f5a43045595441dda27ecce78e85a7172a9f0b9301b713e4f639388be9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0c3205634aaa1ec9add93ff4da5799da6c5f8702a91abaa5d6b52dfc77a0ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:03Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.977775 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:03Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.980384 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.980473 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.980491 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.980516 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.980534 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:03Z","lastTransitionTime":"2025-11-26T14:16:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:03 crc kubenswrapper[5037]: I1126 14:16:03.996620 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3393ec4-cc72-499a-8557-ec6ca329a142\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hn6x5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:03Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.007909 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-lxpjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"490e7d88-ae7f-45f9-ab12-598c33e3bc69\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5sbcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-lxpjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:04Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.025529 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"821d0155-28e9-4160-8885-aa8cc1d60197\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"t denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 14:15:53.421172 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 14:15:53.421210 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421215 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421220 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 14:15:53.421225 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 14:15:53.421228 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 14:15:53.421231 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 14:15:53.421248 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1126 14:15:53.426755 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3728845122/tls.crt::/tmp/serving-cert-3728845122/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764166537\\\\\\\\\\\\\\\" (2025-11-26 14:15:36 +0000 UTC to 2025-12-26 14:15:37 +0000 UTC (now=2025-11-26 14:15:53.426718319 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426903 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764166548\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764166547\\\\\\\\\\\\\\\" (2025-11-26 13:15:47 +0000 UTC to 2026-11-26 13:15:47 +0000 UTC (now=2025-11-26 14:15:53.426883943 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426929 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1126 14:15:53.426951 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nF1126 14:15:53.427030 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:04Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.039580 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:04Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.053717 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d163276317717346ec2c289a779a2784a200c0a4230bbcef92def1d1c55fcab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:04Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.069521 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bcabca26fad475e5fa46de4c0683cb4671a209cc69dbd1509f933cf799091e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae57cbd99d2dcba3594b74304119a4a8030da193dce32afd77079b3cfaf45713\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8jk2d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:04Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.083012 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.083059 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.083072 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.083089 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.083100 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:04Z","lastTransitionTime":"2025-11-26T14:16:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.091670 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"454ee6da-70e5-4d30-89e5-19a35123a278\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f1e8bfa4deb76c13528d9aa2414c14ba6cc0e4637f2cf84c153398b360cad4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://306a3ae23bf504e98f4e7be45cebf984a5dbf47fda9720237c881cf65de43b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0319889506261e48b8db06cd292ba17fb46399b0b2063670c5c0e179a801f9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86a35a51d679468b21fdd174d7148d46f8c1acddbae627ed5c27b61aa399b897\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf3e3bb0b0e0730b9bbd45aad381d5f38940c4a36676db5e9264ccb473f173f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feebe91c810ee2c7c5f9aefe54887ecbc31a89a83a03ac6bbac7f373e15752e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://374f3356b5dd72f14cd7dceb298e27429ba58d7fae836a8c08c06501d2694060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3b5707990abcd8005bb71376bda6e0f62c32c806b11c5db27e0e06e5ca90c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fdhhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:04Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.109127 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2e7efb219e45d8ae5c49bd0dfaa921f6c02e4646ea234df0f8b1a3f50adab58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:04Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.119866 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8tjq6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f4a637d-4b3f-4289-a84c-cd2559430a0e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad15902c02983c178ab3ce11a5103fa144f6dd39fd78aa6243bf9babd10861e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mjhs8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8tjq6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:04Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.132716 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7bxxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd349c4b-e265-4484-ab92-b4328ebde7fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7d3a1479d3bf371e9b7b4bc4c57843ede4b11d782b732245781ec31e0da71f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrqz9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7bxxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:04Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.186222 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.186326 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.186345 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.186366 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.186380 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:04Z","lastTransitionTime":"2025-11-26T14:16:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.265470 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" event={"ID":"b3393ec4-cc72-499a-8557-ec6ca329a142","Type":"ContainerStarted","Data":"c5654633d7930074536d4d9b179c36da442f07ef7e7e44c498c38ad51f21c4cc"} Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.283953 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2e7efb219e45d8ae5c49bd0dfaa921f6c02e4646ea234df0f8b1a3f50adab58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:04Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.289537 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.289590 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.289601 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.289622 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.289634 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:04Z","lastTransitionTime":"2025-11-26T14:16:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.301536 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8tjq6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f4a637d-4b3f-4289-a84c-cd2559430a0e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad15902c02983c178ab3ce11a5103fa144f6dd39fd78aa6243bf9babd10861e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mjhs8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8tjq6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:04Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.318200 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7bxxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd349c4b-e265-4484-ab92-b4328ebde7fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7d3a1479d3bf371e9b7b4bc4c57843ede4b11d782b732245781ec31e0da71f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrqz9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7bxxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:04Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.332807 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1755f26d-9772-47cd-9336-8c3e94febe60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af730a97f25a795f2f5f5a9b59a3c72868fd1d8f16a451fed1f7ce947779786e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10f95ff9c8fe951bea68ca3932581ecdcb55eee4f45bd79eeeb314fbd67ee80\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://222073fcbe74545f98ff4e8e05ced7ddc2e23933edff2e2135da7fbc33cfac57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:04Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.347202 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:04Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.361545 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92b37f5a43045595441dda27ecce78e85a7172a9f0b9301b713e4f639388be9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0c3205634aaa1ec9add93ff4da5799da6c5f8702a91abaa5d6b52dfc77a0ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:04Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.379836 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:04Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.393198 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.393245 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.393258 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.393296 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.393313 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:04Z","lastTransitionTime":"2025-11-26T14:16:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.399323 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3393ec4-cc72-499a-8557-ec6ca329a142\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5654633d7930074536d4d9b179c36da442f07ef7e7e44c498c38ad51f21c4cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hn6x5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:04Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.415947 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-lxpjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"490e7d88-ae7f-45f9-ab12-598c33e3bc69\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5sbcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-lxpjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:04Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.447320 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"454ee6da-70e5-4d30-89e5-19a35123a278\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f1e8bfa4deb76c13528d9aa2414c14ba6cc0e4637f2cf84c153398b360cad4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://306a3ae23bf504e98f4e7be45cebf984a5dbf47fda9720237c881cf65de43b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0319889506261e48b8db06cd292ba17fb46399b0b2063670c5c0e179a801f9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86a35a51d679468b21fdd174d7148d46f8c1acddbae627ed5c27b61aa399b897\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf3e3bb0b0e0730b9bbd45aad381d5f38940c4a36676db5e9264ccb473f173f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feebe91c810ee2c7c5f9aefe54887ecbc31a89a83a03ac6bbac7f373e15752e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://374f3356b5dd72f14cd7dceb298e27429ba58d7fae836a8c08c06501d2694060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3b5707990abcd8005bb71376bda6e0f62c32c806b11c5db27e0e06e5ca90c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fdhhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:04Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.478767 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"821d0155-28e9-4160-8885-aa8cc1d60197\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"t denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 14:15:53.421172 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 14:15:53.421210 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421215 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421220 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 14:15:53.421225 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 14:15:53.421228 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 14:15:53.421231 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 14:15:53.421248 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1126 14:15:53.426755 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3728845122/tls.crt::/tmp/serving-cert-3728845122/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764166537\\\\\\\\\\\\\\\" (2025-11-26 14:15:36 +0000 UTC to 2025-12-26 14:15:37 +0000 UTC (now=2025-11-26 14:15:53.426718319 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426903 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764166548\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764166547\\\\\\\\\\\\\\\" (2025-11-26 13:15:47 +0000 UTC to 2026-11-26 13:15:47 +0000 UTC (now=2025-11-26 14:15:53.426883943 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426929 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1126 14:15:53.426951 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nF1126 14:15:53.427030 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:04Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.496364 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.496409 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.496420 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.496439 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.496451 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:04Z","lastTransitionTime":"2025-11-26T14:16:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.503517 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:04Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.519519 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d163276317717346ec2c289a779a2784a200c0a4230bbcef92def1d1c55fcab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:04Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.532326 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bcabca26fad475e5fa46de4c0683cb4671a209cc69dbd1509f933cf799091e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae57cbd99d2dcba3594b74304119a4a8030da193dce32afd77079b3cfaf45713\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8jk2d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:04Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.600713 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.600791 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.600809 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.601023 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.601053 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:04Z","lastTransitionTime":"2025-11-26T14:16:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.704115 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.704154 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.704164 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.704183 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.704194 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:04Z","lastTransitionTime":"2025-11-26T14:16:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.806416 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.806460 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.806472 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.806491 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.806505 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:04Z","lastTransitionTime":"2025-11-26T14:16:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.910085 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.910412 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.910424 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.910445 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:04 crc kubenswrapper[5037]: I1126 14:16:04.910458 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:04Z","lastTransitionTime":"2025-11-26T14:16:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.013483 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.013573 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.013597 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.013631 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.013652 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:05Z","lastTransitionTime":"2025-11-26T14:16:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.117276 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.117377 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.117395 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.117424 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.117444 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:05Z","lastTransitionTime":"2025-11-26T14:16:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.219793 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.219837 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.219852 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.219873 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.219888 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:05Z","lastTransitionTime":"2025-11-26T14:16:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.322163 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.322215 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.322226 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.322247 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.322261 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:05Z","lastTransitionTime":"2025-11-26T14:16:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.425236 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.425276 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.425302 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.425323 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.425335 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:05Z","lastTransitionTime":"2025-11-26T14:16:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.527723 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.527771 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.527781 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.527800 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.527815 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:05Z","lastTransitionTime":"2025-11-26T14:16:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.630831 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.630871 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.630882 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.630900 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.630912 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:05Z","lastTransitionTime":"2025-11-26T14:16:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.733889 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.733935 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.733948 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.733966 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.733978 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:05Z","lastTransitionTime":"2025-11-26T14:16:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.837120 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.837164 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.837172 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.837188 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.837202 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:05Z","lastTransitionTime":"2025-11-26T14:16:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.908116 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.908215 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.908132 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:16:05 crc kubenswrapper[5037]: E1126 14:16:05.908308 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:16:05 crc kubenswrapper[5037]: E1126 14:16:05.908424 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:16:05 crc kubenswrapper[5037]: E1126 14:16:05.908517 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.940821 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.940867 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.940881 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.940898 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:05 crc kubenswrapper[5037]: I1126 14:16:05.940912 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:05Z","lastTransitionTime":"2025-11-26T14:16:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.043877 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.043936 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.043946 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.043964 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.043979 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:06Z","lastTransitionTime":"2025-11-26T14:16:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.146652 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.146713 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.146727 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.146752 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.146772 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:06Z","lastTransitionTime":"2025-11-26T14:16:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.249353 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.249405 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.249416 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.249434 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.249445 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:06Z","lastTransitionTime":"2025-11-26T14:16:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.275431 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fdhhj_454ee6da-70e5-4d30-89e5-19a35123a278/ovnkube-controller/0.log" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.280423 5037 generic.go:334] "Generic (PLEG): container finished" podID="454ee6da-70e5-4d30-89e5-19a35123a278" containerID="374f3356b5dd72f14cd7dceb298e27429ba58d7fae836a8c08c06501d2694060" exitCode=1 Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.280496 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" event={"ID":"454ee6da-70e5-4d30-89e5-19a35123a278","Type":"ContainerDied","Data":"374f3356b5dd72f14cd7dceb298e27429ba58d7fae836a8c08c06501d2694060"} Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.281254 5037 scope.go:117] "RemoveContainer" containerID="374f3356b5dd72f14cd7dceb298e27429ba58d7fae836a8c08c06501d2694060" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.312269 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.313167 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"821d0155-28e9-4160-8885-aa8cc1d60197\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"t denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 14:15:53.421172 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 14:15:53.421210 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421215 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421220 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 14:15:53.421225 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 14:15:53.421228 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 14:15:53.421231 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 14:15:53.421248 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1126 14:15:53.426755 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3728845122/tls.crt::/tmp/serving-cert-3728845122/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764166537\\\\\\\\\\\\\\\" (2025-11-26 14:15:36 +0000 UTC to 2025-12-26 14:15:37 +0000 UTC (now=2025-11-26 14:15:53.426718319 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426903 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764166548\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764166547\\\\\\\\\\\\\\\" (2025-11-26 13:15:47 +0000 UTC to 2026-11-26 13:15:47 +0000 UTC (now=2025-11-26 14:15:53.426883943 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426929 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1126 14:15:53.426951 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nF1126 14:15:53.427030 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:06Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.326392 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:06Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.344571 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d163276317717346ec2c289a779a2784a200c0a4230bbcef92def1d1c55fcab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:06Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.355896 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.355935 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.355945 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.355960 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.355973 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:06Z","lastTransitionTime":"2025-11-26T14:16:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.362273 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bcabca26fad475e5fa46de4c0683cb4671a209cc69dbd1509f933cf799091e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae57cbd99d2dcba3594b74304119a4a8030da193dce32afd77079b3cfaf45713\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8jk2d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:06Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.386684 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"454ee6da-70e5-4d30-89e5-19a35123a278\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f1e8bfa4deb76c13528d9aa2414c14ba6cc0e4637f2cf84c153398b360cad4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://306a3ae23bf504e98f4e7be45cebf984a5dbf47fda9720237c881cf65de43b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0319889506261e48b8db06cd292ba17fb46399b0b2063670c5c0e179a801f9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86a35a51d679468b21fdd174d7148d46f8c1acddbae627ed5c27b61aa399b897\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf3e3bb0b0e0730b9bbd45aad381d5f38940c4a36676db5e9264ccb473f173f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feebe91c810ee2c7c5f9aefe54887ecbc31a89a83a03ac6bbac7f373e15752e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://374f3356b5dd72f14cd7dceb298e27429ba58d7fae836a8c08c06501d2694060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://374f3356b5dd72f14cd7dceb298e27429ba58d7fae836a8c08c06501d2694060\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T14:16:05Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 14:16:05.281339 6327 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 14:16:05.281362 6327 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 14:16:05.281384 6327 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 14:16:05.281392 6327 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 14:16:05.281411 6327 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 14:16:05.281419 6327 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 14:16:05.281442 6327 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 14:16:05.281451 6327 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 14:16:05.281455 6327 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 14:16:05.281458 6327 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 14:16:05.281471 6327 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 14:16:05.281494 6327 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1126 14:16:05.281515 6327 factory.go:656] Stopping watch factory\\\\nI1126 14:16:05.281534 6327 ovnkube.go:599] Stopped ovnkube\\\\nI1126 14:16:05.281532 6327 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3b5707990abcd8005bb71376bda6e0f62c32c806b11c5db27e0e06e5ca90c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fdhhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:06Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.405035 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2e7efb219e45d8ae5c49bd0dfaa921f6c02e4646ea234df0f8b1a3f50adab58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:06Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.420765 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8tjq6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f4a637d-4b3f-4289-a84c-cd2559430a0e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad15902c02983c178ab3ce11a5103fa144f6dd39fd78aa6243bf9babd10861e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mjhs8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8tjq6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:06Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.435955 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7bxxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd349c4b-e265-4484-ab92-b4328ebde7fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7d3a1479d3bf371e9b7b4bc4c57843ede4b11d782b732245781ec31e0da71f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrqz9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7bxxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:06Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.449734 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:06Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.462454 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.462502 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.462514 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.462536 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.462552 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:06Z","lastTransitionTime":"2025-11-26T14:16:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.474181 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1755f26d-9772-47cd-9336-8c3e94febe60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af730a97f25a795f2f5f5a9b59a3c72868fd1d8f16a451fed1f7ce947779786e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10f95ff9c8fe951bea68ca3932581ecdcb55eee4f45bd79eeeb314fbd67ee80\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://222073fcbe74545f98ff4e8e05ced7ddc2e23933edff2e2135da7fbc33cfac57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:06Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.488109 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:06Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.504160 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3393ec4-cc72-499a-8557-ec6ca329a142\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5654633d7930074536d4d9b179c36da442f07ef7e7e44c498c38ad51f21c4cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hn6x5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:06Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.518139 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-lxpjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"490e7d88-ae7f-45f9-ab12-598c33e3bc69\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5sbcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-lxpjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:06Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.532227 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92b37f5a43045595441dda27ecce78e85a7172a9f0b9301b713e4f639388be9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0c3205634aaa1ec9add93ff4da5799da6c5f8702a91abaa5d6b52dfc77a0ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:06Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.547381 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:06Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.561930 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1755f26d-9772-47cd-9336-8c3e94febe60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af730a97f25a795f2f5f5a9b59a3c72868fd1d8f16a451fed1f7ce947779786e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10f95ff9c8fe951bea68ca3932581ecdcb55eee4f45bd79eeeb314fbd67ee80\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://222073fcbe74545f98ff4e8e05ced7ddc2e23933edff2e2135da7fbc33cfac57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:06Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.566713 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.566764 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.566773 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.566790 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.566803 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:06Z","lastTransitionTime":"2025-11-26T14:16:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.579307 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:06Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.595233 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3393ec4-cc72-499a-8557-ec6ca329a142\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5654633d7930074536d4d9b179c36da442f07ef7e7e44c498c38ad51f21c4cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hn6x5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:06Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.609972 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-lxpjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"490e7d88-ae7f-45f9-ab12-598c33e3bc69\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5sbcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-lxpjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:06Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.626256 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92b37f5a43045595441dda27ecce78e85a7172a9f0b9301b713e4f639388be9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0c3205634aaa1ec9add93ff4da5799da6c5f8702a91abaa5d6b52dfc77a0ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:06Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.640892 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"821d0155-28e9-4160-8885-aa8cc1d60197\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"t denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 14:15:53.421172 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 14:15:53.421210 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421215 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421220 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 14:15:53.421225 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 14:15:53.421228 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 14:15:53.421231 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 14:15:53.421248 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1126 14:15:53.426755 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3728845122/tls.crt::/tmp/serving-cert-3728845122/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764166537\\\\\\\\\\\\\\\" (2025-11-26 14:15:36 +0000 UTC to 2025-12-26 14:15:37 +0000 UTC (now=2025-11-26 14:15:53.426718319 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426903 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764166548\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764166547\\\\\\\\\\\\\\\" (2025-11-26 13:15:47 +0000 UTC to 2026-11-26 13:15:47 +0000 UTC (now=2025-11-26 14:15:53.426883943 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426929 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1126 14:15:53.426951 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nF1126 14:15:53.427030 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:06Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.655852 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:06Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.666911 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d163276317717346ec2c289a779a2784a200c0a4230bbcef92def1d1c55fcab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:06Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.668786 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.668817 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.668829 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.668847 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.668859 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:06Z","lastTransitionTime":"2025-11-26T14:16:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.678988 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bcabca26fad475e5fa46de4c0683cb4671a209cc69dbd1509f933cf799091e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae57cbd99d2dcba3594b74304119a4a8030da193dce32afd77079b3cfaf45713\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8jk2d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:06Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.703177 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"454ee6da-70e5-4d30-89e5-19a35123a278\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f1e8bfa4deb76c13528d9aa2414c14ba6cc0e4637f2cf84c153398b360cad4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://306a3ae23bf504e98f4e7be45cebf984a5dbf47fda9720237c881cf65de43b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0319889506261e48b8db06cd292ba17fb46399b0b2063670c5c0e179a801f9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86a35a51d679468b21fdd174d7148d46f8c1acddbae627ed5c27b61aa399b897\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf3e3bb0b0e0730b9bbd45aad381d5f38940c4a36676db5e9264ccb473f173f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feebe91c810ee2c7c5f9aefe54887ecbc31a89a83a03ac6bbac7f373e15752e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://374f3356b5dd72f14cd7dceb298e27429ba58d7fae836a8c08c06501d2694060\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://374f3356b5dd72f14cd7dceb298e27429ba58d7fae836a8c08c06501d2694060\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T14:16:05Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 14:16:05.281339 6327 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 14:16:05.281362 6327 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 14:16:05.281384 6327 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 14:16:05.281392 6327 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 14:16:05.281411 6327 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 14:16:05.281419 6327 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 14:16:05.281442 6327 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 14:16:05.281451 6327 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 14:16:05.281455 6327 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 14:16:05.281458 6327 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 14:16:05.281471 6327 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 14:16:05.281494 6327 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1126 14:16:05.281515 6327 factory.go:656] Stopping watch factory\\\\nI1126 14:16:05.281534 6327 ovnkube.go:599] Stopped ovnkube\\\\nI1126 14:16:05.281532 6327 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3b5707990abcd8005bb71376bda6e0f62c32c806b11c5db27e0e06e5ca90c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fdhhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:06Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.719550 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2e7efb219e45d8ae5c49bd0dfaa921f6c02e4646ea234df0f8b1a3f50adab58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:06Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.737205 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8tjq6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f4a637d-4b3f-4289-a84c-cd2559430a0e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad15902c02983c178ab3ce11a5103fa144f6dd39fd78aa6243bf9babd10861e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mjhs8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8tjq6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:06Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.760084 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7bxxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd349c4b-e265-4484-ab92-b4328ebde7fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7d3a1479d3bf371e9b7b4bc4c57843ede4b11d782b732245781ec31e0da71f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrqz9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7bxxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:06Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.772882 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.772922 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.772934 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.772955 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.772968 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:06Z","lastTransitionTime":"2025-11-26T14:16:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.875505 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.875538 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.875550 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.875569 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.875580 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:06Z","lastTransitionTime":"2025-11-26T14:16:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.978362 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.978422 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.978431 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.978449 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:06 crc kubenswrapper[5037]: I1126 14:16:06.978461 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:06Z","lastTransitionTime":"2025-11-26T14:16:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.080971 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.081024 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.081032 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.081051 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.081061 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:07Z","lastTransitionTime":"2025-11-26T14:16:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.183915 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.183969 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.183983 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.184000 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.184011 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:07Z","lastTransitionTime":"2025-11-26T14:16:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.285631 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.285667 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.285678 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.285693 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.285707 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:07Z","lastTransitionTime":"2025-11-26T14:16:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.286690 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fdhhj_454ee6da-70e5-4d30-89e5-19a35123a278/ovnkube-controller/1.log" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.287229 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fdhhj_454ee6da-70e5-4d30-89e5-19a35123a278/ovnkube-controller/0.log" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.290854 5037 generic.go:334] "Generic (PLEG): container finished" podID="454ee6da-70e5-4d30-89e5-19a35123a278" containerID="929743b7e95801b01bb311843648e9294f63d4fbd44eb677511e88a13d62889e" exitCode=1 Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.290904 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" event={"ID":"454ee6da-70e5-4d30-89e5-19a35123a278","Type":"ContainerDied","Data":"929743b7e95801b01bb311843648e9294f63d4fbd44eb677511e88a13d62889e"} Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.290980 5037 scope.go:117] "RemoveContainer" containerID="374f3356b5dd72f14cd7dceb298e27429ba58d7fae836a8c08c06501d2694060" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.292061 5037 scope.go:117] "RemoveContainer" containerID="929743b7e95801b01bb311843648e9294f63d4fbd44eb677511e88a13d62889e" Nov 26 14:16:07 crc kubenswrapper[5037]: E1126 14:16:07.292344 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-fdhhj_openshift-ovn-kubernetes(454ee6da-70e5-4d30-89e5-19a35123a278)\"" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.319429 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2e7efb219e45d8ae5c49bd0dfaa921f6c02e4646ea234df0f8b1a3f50adab58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:07Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.331013 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8tjq6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f4a637d-4b3f-4289-a84c-cd2559430a0e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad15902c02983c178ab3ce11a5103fa144f6dd39fd78aa6243bf9babd10861e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mjhs8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8tjq6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:07Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.342469 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7bxxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd349c4b-e265-4484-ab92-b4328ebde7fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7d3a1479d3bf371e9b7b4bc4c57843ede4b11d782b732245781ec31e0da71f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrqz9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7bxxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:07Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.356278 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1755f26d-9772-47cd-9336-8c3e94febe60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af730a97f25a795f2f5f5a9b59a3c72868fd1d8f16a451fed1f7ce947779786e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10f95ff9c8fe951bea68ca3932581ecdcb55eee4f45bd79eeeb314fbd67ee80\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://222073fcbe74545f98ff4e8e05ced7ddc2e23933edff2e2135da7fbc33cfac57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:07Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.369325 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:07Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.381397 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92b37f5a43045595441dda27ecce78e85a7172a9f0b9301b713e4f639388be9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0c3205634aaa1ec9add93ff4da5799da6c5f8702a91abaa5d6b52dfc77a0ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:07Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.388298 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.388339 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.388349 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.388366 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.388377 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:07Z","lastTransitionTime":"2025-11-26T14:16:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.395929 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:07Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.410799 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3393ec4-cc72-499a-8557-ec6ca329a142\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5654633d7930074536d4d9b179c36da442f07ef7e7e44c498c38ad51f21c4cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hn6x5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:07Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.426059 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-lxpjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"490e7d88-ae7f-45f9-ab12-598c33e3bc69\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5sbcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-lxpjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:07Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.440167 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"821d0155-28e9-4160-8885-aa8cc1d60197\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"t denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 14:15:53.421172 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 14:15:53.421210 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421215 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421220 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 14:15:53.421225 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 14:15:53.421228 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 14:15:53.421231 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 14:15:53.421248 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1126 14:15:53.426755 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3728845122/tls.crt::/tmp/serving-cert-3728845122/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764166537\\\\\\\\\\\\\\\" (2025-11-26 14:15:36 +0000 UTC to 2025-12-26 14:15:37 +0000 UTC (now=2025-11-26 14:15:53.426718319 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426903 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764166548\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764166547\\\\\\\\\\\\\\\" (2025-11-26 13:15:47 +0000 UTC to 2026-11-26 13:15:47 +0000 UTC (now=2025-11-26 14:15:53.426883943 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426929 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1126 14:15:53.426951 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nF1126 14:15:53.427030 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:07Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.452447 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:07Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.462775 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d163276317717346ec2c289a779a2784a200c0a4230bbcef92def1d1c55fcab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:07Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.472459 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bcabca26fad475e5fa46de4c0683cb4671a209cc69dbd1509f933cf799091e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae57cbd99d2dcba3594b74304119a4a8030da193dce32afd77079b3cfaf45713\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8jk2d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:07Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.489747 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"454ee6da-70e5-4d30-89e5-19a35123a278\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f1e8bfa4deb76c13528d9aa2414c14ba6cc0e4637f2cf84c153398b360cad4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://306a3ae23bf504e98f4e7be45cebf984a5dbf47fda9720237c881cf65de43b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0319889506261e48b8db06cd292ba17fb46399b0b2063670c5c0e179a801f9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86a35a51d679468b21fdd174d7148d46f8c1acddbae627ed5c27b61aa399b897\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf3e3bb0b0e0730b9bbd45aad381d5f38940c4a36676db5e9264ccb473f173f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feebe91c810ee2c7c5f9aefe54887ecbc31a89a83a03ac6bbac7f373e15752e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://929743b7e95801b01bb311843648e9294f63d4fbd44eb677511e88a13d62889e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://374f3356b5dd72f14cd7dceb298e27429ba58d7fae836a8c08c06501d2694060\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T14:16:05Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 14:16:05.281339 6327 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 14:16:05.281362 6327 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 14:16:05.281384 6327 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 14:16:05.281392 6327 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 14:16:05.281411 6327 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 14:16:05.281419 6327 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 14:16:05.281442 6327 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 14:16:05.281451 6327 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 14:16:05.281455 6327 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 14:16:05.281458 6327 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 14:16:05.281471 6327 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 14:16:05.281494 6327 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1126 14:16:05.281515 6327 factory.go:656] Stopping watch factory\\\\nI1126 14:16:05.281534 6327 ovnkube.go:599] Stopped ovnkube\\\\nI1126 14:16:05.281532 6327 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:02Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://929743b7e95801b01bb311843648e9294f63d4fbd44eb677511e88a13d62889e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T14:16:07Z\\\",\\\"message\\\":\\\"[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/olm-operator-metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.168\\\\\\\", Port:8443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1126 14:16:07.186947 6506 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1126 14:16:07.187053 6506 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI1126 14:16:07.187061 6506 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1126 14:16:07.187067 6506 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1126 14:16:07.186795 6506 services_controller.go:451] Built service default/kubernetes cluster-wide LB for network=default: []services.LB{}\\\\nF1126 14:16:07.187078 6506 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3b5707990abcd8005bb71376bda6e0f62c32c806b11c5db27e0e06e5ca90c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fdhhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:07Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.491115 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.491162 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.491179 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.491202 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.491222 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:07Z","lastTransitionTime":"2025-11-26T14:16:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.593889 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.594128 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.594230 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.594323 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.594392 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:07Z","lastTransitionTime":"2025-11-26T14:16:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.697589 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.698138 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.698488 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.698667 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.698932 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:07Z","lastTransitionTime":"2025-11-26T14:16:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.801487 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.801543 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.801556 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.801576 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.801589 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:07Z","lastTransitionTime":"2025-11-26T14:16:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.904268 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.904325 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.904335 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.904353 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.904365 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:07Z","lastTransitionTime":"2025-11-26T14:16:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.908247 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.908306 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:16:07 crc kubenswrapper[5037]: I1126 14:16:07.908244 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:16:07 crc kubenswrapper[5037]: E1126 14:16:07.908399 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:16:07 crc kubenswrapper[5037]: E1126 14:16:07.908478 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:16:07 crc kubenswrapper[5037]: E1126 14:16:07.908677 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.007633 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.007675 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.007684 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.007702 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.007713 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:08Z","lastTransitionTime":"2025-11-26T14:16:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.111531 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.111597 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.111612 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.111636 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.111652 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:08Z","lastTransitionTime":"2025-11-26T14:16:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.151183 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cdzgw"] Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.151704 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cdzgw" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.154338 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.156575 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.168154 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2e7efb219e45d8ae5c49bd0dfaa921f6c02e4646ea234df0f8b1a3f50adab58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:08Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.179319 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8tjq6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f4a637d-4b3f-4289-a84c-cd2559430a0e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad15902c02983c178ab3ce11a5103fa144f6dd39fd78aa6243bf9babd10861e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mjhs8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8tjq6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:08Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.190966 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7bxxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd349c4b-e265-4484-ab92-b4328ebde7fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7d3a1479d3bf371e9b7b4bc4c57843ede4b11d782b732245781ec31e0da71f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrqz9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7bxxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:08Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.203417 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1755f26d-9772-47cd-9336-8c3e94febe60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af730a97f25a795f2f5f5a9b59a3c72868fd1d8f16a451fed1f7ce947779786e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10f95ff9c8fe951bea68ca3932581ecdcb55eee4f45bd79eeeb314fbd67ee80\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://222073fcbe74545f98ff4e8e05ced7ddc2e23933edff2e2135da7fbc33cfac57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:08Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.214262 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.214327 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.214339 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.214358 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.214374 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:08Z","lastTransitionTime":"2025-11-26T14:16:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.217865 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:08Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.219317 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/4e677a13-ab89-4820-868f-ad848e66e4b0-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-cdzgw\" (UID: \"4e677a13-ab89-4820-868f-ad848e66e4b0\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cdzgw" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.219351 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/4e677a13-ab89-4820-868f-ad848e66e4b0-env-overrides\") pod \"ovnkube-control-plane-749d76644c-cdzgw\" (UID: \"4e677a13-ab89-4820-868f-ad848e66e4b0\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cdzgw" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.219374 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j9kgj\" (UniqueName: \"kubernetes.io/projected/4e677a13-ab89-4820-868f-ad848e66e4b0-kube-api-access-j9kgj\") pod \"ovnkube-control-plane-749d76644c-cdzgw\" (UID: \"4e677a13-ab89-4820-868f-ad848e66e4b0\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cdzgw" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.219418 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/4e677a13-ab89-4820-868f-ad848e66e4b0-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-cdzgw\" (UID: \"4e677a13-ab89-4820-868f-ad848e66e4b0\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cdzgw" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.231999 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cdzgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e677a13-ab89-4820-868f-ad848e66e4b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j9kgj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j9kgj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:16:08Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cdzgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:08Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.247653 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92b37f5a43045595441dda27ecce78e85a7172a9f0b9301b713e4f639388be9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0c3205634aaa1ec9add93ff4da5799da6c5f8702a91abaa5d6b52dfc77a0ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:08Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.260700 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:08Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.280331 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3393ec4-cc72-499a-8557-ec6ca329a142\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5654633d7930074536d4d9b179c36da442f07ef7e7e44c498c38ad51f21c4cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hn6x5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:08Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.296674 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fdhhj_454ee6da-70e5-4d30-89e5-19a35123a278/ovnkube-controller/1.log" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.301050 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-lxpjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"490e7d88-ae7f-45f9-ab12-598c33e3bc69\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5sbcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-lxpjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:08Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.316625 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"821d0155-28e9-4160-8885-aa8cc1d60197\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"t denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 14:15:53.421172 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 14:15:53.421210 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421215 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421220 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 14:15:53.421225 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 14:15:53.421228 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 14:15:53.421231 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 14:15:53.421248 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1126 14:15:53.426755 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3728845122/tls.crt::/tmp/serving-cert-3728845122/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764166537\\\\\\\\\\\\\\\" (2025-11-26 14:15:36 +0000 UTC to 2025-12-26 14:15:37 +0000 UTC (now=2025-11-26 14:15:53.426718319 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426903 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764166548\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764166547\\\\\\\\\\\\\\\" (2025-11-26 13:15:47 +0000 UTC to 2026-11-26 13:15:47 +0000 UTC (now=2025-11-26 14:15:53.426883943 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426929 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1126 14:15:53.426951 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nF1126 14:15:53.427030 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:08Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.317464 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.317498 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.317511 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.317530 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.317543 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:08Z","lastTransitionTime":"2025-11-26T14:16:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.319863 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/4e677a13-ab89-4820-868f-ad848e66e4b0-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-cdzgw\" (UID: \"4e677a13-ab89-4820-868f-ad848e66e4b0\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cdzgw" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.319915 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/4e677a13-ab89-4820-868f-ad848e66e4b0-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-cdzgw\" (UID: \"4e677a13-ab89-4820-868f-ad848e66e4b0\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cdzgw" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.319934 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/4e677a13-ab89-4820-868f-ad848e66e4b0-env-overrides\") pod \"ovnkube-control-plane-749d76644c-cdzgw\" (UID: \"4e677a13-ab89-4820-868f-ad848e66e4b0\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cdzgw" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.319952 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j9kgj\" (UniqueName: \"kubernetes.io/projected/4e677a13-ab89-4820-868f-ad848e66e4b0-kube-api-access-j9kgj\") pod \"ovnkube-control-plane-749d76644c-cdzgw\" (UID: \"4e677a13-ab89-4820-868f-ad848e66e4b0\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cdzgw" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.321088 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/4e677a13-ab89-4820-868f-ad848e66e4b0-env-overrides\") pod \"ovnkube-control-plane-749d76644c-cdzgw\" (UID: \"4e677a13-ab89-4820-868f-ad848e66e4b0\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cdzgw" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.321696 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/4e677a13-ab89-4820-868f-ad848e66e4b0-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-cdzgw\" (UID: \"4e677a13-ab89-4820-868f-ad848e66e4b0\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cdzgw" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.330011 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/4e677a13-ab89-4820-868f-ad848e66e4b0-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-cdzgw\" (UID: \"4e677a13-ab89-4820-868f-ad848e66e4b0\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cdzgw" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.335249 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:08Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.338034 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j9kgj\" (UniqueName: \"kubernetes.io/projected/4e677a13-ab89-4820-868f-ad848e66e4b0-kube-api-access-j9kgj\") pod \"ovnkube-control-plane-749d76644c-cdzgw\" (UID: \"4e677a13-ab89-4820-868f-ad848e66e4b0\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cdzgw" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.349153 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d163276317717346ec2c289a779a2784a200c0a4230bbcef92def1d1c55fcab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:08Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.361869 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bcabca26fad475e5fa46de4c0683cb4671a209cc69dbd1509f933cf799091e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae57cbd99d2dcba3594b74304119a4a8030da193dce32afd77079b3cfaf45713\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8jk2d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:08Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.384794 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"454ee6da-70e5-4d30-89e5-19a35123a278\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f1e8bfa4deb76c13528d9aa2414c14ba6cc0e4637f2cf84c153398b360cad4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://306a3ae23bf504e98f4e7be45cebf984a5dbf47fda9720237c881cf65de43b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0319889506261e48b8db06cd292ba17fb46399b0b2063670c5c0e179a801f9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86a35a51d679468b21fdd174d7148d46f8c1acddbae627ed5c27b61aa399b897\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf3e3bb0b0e0730b9bbd45aad381d5f38940c4a36676db5e9264ccb473f173f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feebe91c810ee2c7c5f9aefe54887ecbc31a89a83a03ac6bbac7f373e15752e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://929743b7e95801b01bb311843648e9294f63d4fbd44eb677511e88a13d62889e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://374f3356b5dd72f14cd7dceb298e27429ba58d7fae836a8c08c06501d2694060\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T14:16:05Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 14:16:05.281339 6327 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 14:16:05.281362 6327 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 14:16:05.281384 6327 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 14:16:05.281392 6327 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 14:16:05.281411 6327 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 14:16:05.281419 6327 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 14:16:05.281442 6327 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 14:16:05.281451 6327 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 14:16:05.281455 6327 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 14:16:05.281458 6327 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 14:16:05.281471 6327 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 14:16:05.281494 6327 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1126 14:16:05.281515 6327 factory.go:656] Stopping watch factory\\\\nI1126 14:16:05.281534 6327 ovnkube.go:599] Stopped ovnkube\\\\nI1126 14:16:05.281532 6327 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:02Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://929743b7e95801b01bb311843648e9294f63d4fbd44eb677511e88a13d62889e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T14:16:07Z\\\",\\\"message\\\":\\\"[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/olm-operator-metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.168\\\\\\\", Port:8443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1126 14:16:07.186947 6506 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1126 14:16:07.187053 6506 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI1126 14:16:07.187061 6506 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1126 14:16:07.187067 6506 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1126 14:16:07.186795 6506 services_controller.go:451] Built service default/kubernetes cluster-wide LB for network=default: []services.LB{}\\\\nF1126 14:16:07.187078 6506 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3b5707990abcd8005bb71376bda6e0f62c32c806b11c5db27e0e06e5ca90c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fdhhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:08Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.419895 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.419934 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.419943 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.419960 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.419970 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:08Z","lastTransitionTime":"2025-11-26T14:16:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.464836 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cdzgw" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.523232 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.523265 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.523275 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.523303 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.523313 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:08Z","lastTransitionTime":"2025-11-26T14:16:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.626365 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.626671 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.626709 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.626730 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.626745 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:08Z","lastTransitionTime":"2025-11-26T14:16:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.728906 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.728951 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.728960 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.728976 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.728986 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:08Z","lastTransitionTime":"2025-11-26T14:16:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.832830 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.832868 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.832878 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.832895 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.832905 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:08Z","lastTransitionTime":"2025-11-26T14:16:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.935926 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.935976 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.935993 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.936010 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:08 crc kubenswrapper[5037]: I1126 14:16:08.936024 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:08Z","lastTransitionTime":"2025-11-26T14:16:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.038780 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.038840 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.038853 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.038870 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.038879 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:09Z","lastTransitionTime":"2025-11-26T14:16:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.141414 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.141478 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.141495 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.141518 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.141533 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:09Z","lastTransitionTime":"2025-11-26T14:16:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.244424 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.244474 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.244486 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.244502 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.244514 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:09Z","lastTransitionTime":"2025-11-26T14:16:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.252506 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-wjch9"] Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.253141 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:16:09 crc kubenswrapper[5037]: E1126 14:16:09.253226 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.267674 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92b37f5a43045595441dda27ecce78e85a7172a9f0b9301b713e4f639388be9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0c3205634aaa1ec9add93ff4da5799da6c5f8702a91abaa5d6b52dfc77a0ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:09Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.280602 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:09Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.295977 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3393ec4-cc72-499a-8557-ec6ca329a142\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5654633d7930074536d4d9b179c36da442f07ef7e7e44c498c38ad51f21c4cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hn6x5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:09Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.306523 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cdzgw" event={"ID":"4e677a13-ab89-4820-868f-ad848e66e4b0","Type":"ContainerStarted","Data":"ea1f5ee9860f5858603e973bab92f7eb597b2343eaadeda4e8c58ae962d61223"} Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.306585 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cdzgw" event={"ID":"4e677a13-ab89-4820-868f-ad848e66e4b0","Type":"ContainerStarted","Data":"966a02276865593c5e0d10cb8b03dcfa5da44a3f1fe26a29d17c28c868157eb4"} Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.306601 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cdzgw" event={"ID":"4e677a13-ab89-4820-868f-ad848e66e4b0","Type":"ContainerStarted","Data":"a160cc2b9b16ab951bc5bc8c912024365a39e1a013f9a0e0f6660216e2028e39"} Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.314775 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-lxpjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"490e7d88-ae7f-45f9-ab12-598c33e3bc69\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5sbcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-lxpjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:09Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.331217 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zp6hm\" (UniqueName: \"kubernetes.io/projected/b18a6f09-7a1e-4965-81e2-dde847147b41-kube-api-access-zp6hm\") pod \"network-metrics-daemon-wjch9\" (UID: \"b18a6f09-7a1e-4965-81e2-dde847147b41\") " pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.331316 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b18a6f09-7a1e-4965-81e2-dde847147b41-metrics-certs\") pod \"network-metrics-daemon-wjch9\" (UID: \"b18a6f09-7a1e-4965-81e2-dde847147b41\") " pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.331595 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"821d0155-28e9-4160-8885-aa8cc1d60197\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"t denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 14:15:53.421172 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 14:15:53.421210 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421215 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421220 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 14:15:53.421225 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 14:15:53.421228 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 14:15:53.421231 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 14:15:53.421248 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1126 14:15:53.426755 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3728845122/tls.crt::/tmp/serving-cert-3728845122/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764166537\\\\\\\\\\\\\\\" (2025-11-26 14:15:36 +0000 UTC to 2025-12-26 14:15:37 +0000 UTC (now=2025-11-26 14:15:53.426718319 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426903 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764166548\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764166547\\\\\\\\\\\\\\\" (2025-11-26 13:15:47 +0000 UTC to 2026-11-26 13:15:47 +0000 UTC (now=2025-11-26 14:15:53.426883943 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426929 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1126 14:15:53.426951 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nF1126 14:15:53.427030 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:09Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.344943 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:09Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.349730 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.349767 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.349778 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.349794 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.349804 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:09Z","lastTransitionTime":"2025-11-26T14:16:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.359775 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d163276317717346ec2c289a779a2784a200c0a4230bbcef92def1d1c55fcab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:09Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.372213 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bcabca26fad475e5fa46de4c0683cb4671a209cc69dbd1509f933cf799091e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae57cbd99d2dcba3594b74304119a4a8030da193dce32afd77079b3cfaf45713\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8jk2d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:09Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.393877 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"454ee6da-70e5-4d30-89e5-19a35123a278\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f1e8bfa4deb76c13528d9aa2414c14ba6cc0e4637f2cf84c153398b360cad4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://306a3ae23bf504e98f4e7be45cebf984a5dbf47fda9720237c881cf65de43b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0319889506261e48b8db06cd292ba17fb46399b0b2063670c5c0e179a801f9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86a35a51d679468b21fdd174d7148d46f8c1acddbae627ed5c27b61aa399b897\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf3e3bb0b0e0730b9bbd45aad381d5f38940c4a36676db5e9264ccb473f173f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feebe91c810ee2c7c5f9aefe54887ecbc31a89a83a03ac6bbac7f373e15752e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://929743b7e95801b01bb311843648e9294f63d4fbd44eb677511e88a13d62889e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://374f3356b5dd72f14cd7dceb298e27429ba58d7fae836a8c08c06501d2694060\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T14:16:05Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 14:16:05.281339 6327 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 14:16:05.281362 6327 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 14:16:05.281384 6327 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 14:16:05.281392 6327 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 14:16:05.281411 6327 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 14:16:05.281419 6327 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 14:16:05.281442 6327 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 14:16:05.281451 6327 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 14:16:05.281455 6327 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 14:16:05.281458 6327 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 14:16:05.281471 6327 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 14:16:05.281494 6327 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1126 14:16:05.281515 6327 factory.go:656] Stopping watch factory\\\\nI1126 14:16:05.281534 6327 ovnkube.go:599] Stopped ovnkube\\\\nI1126 14:16:05.281532 6327 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:02Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://929743b7e95801b01bb311843648e9294f63d4fbd44eb677511e88a13d62889e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T14:16:07Z\\\",\\\"message\\\":\\\"[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/olm-operator-metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.168\\\\\\\", Port:8443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1126 14:16:07.186947 6506 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1126 14:16:07.187053 6506 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI1126 14:16:07.187061 6506 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1126 14:16:07.187067 6506 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1126 14:16:07.186795 6506 services_controller.go:451] Built service default/kubernetes cluster-wide LB for network=default: []services.LB{}\\\\nF1126 14:16:07.187078 6506 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3b5707990abcd8005bb71376bda6e0f62c32c806b11c5db27e0e06e5ca90c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fdhhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:09Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.405532 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2e7efb219e45d8ae5c49bd0dfaa921f6c02e4646ea234df0f8b1a3f50adab58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:09Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.414831 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8tjq6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f4a637d-4b3f-4289-a84c-cd2559430a0e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad15902c02983c178ab3ce11a5103fa144f6dd39fd78aa6243bf9babd10861e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mjhs8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8tjq6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:09Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.424879 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7bxxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd349c4b-e265-4484-ab92-b4328ebde7fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7d3a1479d3bf371e9b7b4bc4c57843ede4b11d782b732245781ec31e0da71f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrqz9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7bxxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:09Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.432625 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b18a6f09-7a1e-4965-81e2-dde847147b41-metrics-certs\") pod \"network-metrics-daemon-wjch9\" (UID: \"b18a6f09-7a1e-4965-81e2-dde847147b41\") " pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.432708 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zp6hm\" (UniqueName: \"kubernetes.io/projected/b18a6f09-7a1e-4965-81e2-dde847147b41-kube-api-access-zp6hm\") pod \"network-metrics-daemon-wjch9\" (UID: \"b18a6f09-7a1e-4965-81e2-dde847147b41\") " pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:16:09 crc kubenswrapper[5037]: E1126 14:16:09.432951 5037 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 14:16:09 crc kubenswrapper[5037]: E1126 14:16:09.433025 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b18a6f09-7a1e-4965-81e2-dde847147b41-metrics-certs podName:b18a6f09-7a1e-4965-81e2-dde847147b41 nodeName:}" failed. No retries permitted until 2025-11-26 14:16:09.933004085 +0000 UTC m=+36.729774269 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b18a6f09-7a1e-4965-81e2-dde847147b41-metrics-certs") pod "network-metrics-daemon-wjch9" (UID: "b18a6f09-7a1e-4965-81e2-dde847147b41") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.436977 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wjch9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b18a6f09-7a1e-4965-81e2-dde847147b41\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zp6hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zp6hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:16:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wjch9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:09Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.448280 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zp6hm\" (UniqueName: \"kubernetes.io/projected/b18a6f09-7a1e-4965-81e2-dde847147b41-kube-api-access-zp6hm\") pod \"network-metrics-daemon-wjch9\" (UID: \"b18a6f09-7a1e-4965-81e2-dde847147b41\") " pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.448628 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1755f26d-9772-47cd-9336-8c3e94febe60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af730a97f25a795f2f5f5a9b59a3c72868fd1d8f16a451fed1f7ce947779786e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10f95ff9c8fe951bea68ca3932581ecdcb55eee4f45bd79eeeb314fbd67ee80\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://222073fcbe74545f98ff4e8e05ced7ddc2e23933edff2e2135da7fbc33cfac57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:09Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.452412 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.452435 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.452443 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.452461 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.452478 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:09Z","lastTransitionTime":"2025-11-26T14:16:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.462526 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:09Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.474392 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cdzgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e677a13-ab89-4820-868f-ad848e66e4b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j9kgj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j9kgj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:16:08Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cdzgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:09Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.484829 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8tjq6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f4a637d-4b3f-4289-a84c-cd2559430a0e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad15902c02983c178ab3ce11a5103fa144f6dd39fd78aa6243bf9babd10861e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mjhs8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8tjq6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:09Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.495169 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7bxxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd349c4b-e265-4484-ab92-b4328ebde7fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7d3a1479d3bf371e9b7b4bc4c57843ede4b11d782b732245781ec31e0da71f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrqz9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7bxxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:09Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.505000 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wjch9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b18a6f09-7a1e-4965-81e2-dde847147b41\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zp6hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zp6hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:16:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wjch9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:09Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.517069 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2e7efb219e45d8ae5c49bd0dfaa921f6c02e4646ea234df0f8b1a3f50adab58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:09Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.528476 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1755f26d-9772-47cd-9336-8c3e94febe60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af730a97f25a795f2f5f5a9b59a3c72868fd1d8f16a451fed1f7ce947779786e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10f95ff9c8fe951bea68ca3932581ecdcb55eee4f45bd79eeeb314fbd67ee80\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://222073fcbe74545f98ff4e8e05ced7ddc2e23933edff2e2135da7fbc33cfac57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:09Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.540318 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:09Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.551223 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cdzgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e677a13-ab89-4820-868f-ad848e66e4b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966a02276865593c5e0d10cb8b03dcfa5da44a3f1fe26a29d17c28c868157eb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j9kgj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1f5ee9860f5858603e973bab92f7eb597b2343eaadeda4e8c58ae962d61223\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j9kgj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:16:08Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cdzgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:09Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.555577 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.555612 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.555624 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.555641 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.555653 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:09Z","lastTransitionTime":"2025-11-26T14:16:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.565015 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-lxpjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"490e7d88-ae7f-45f9-ab12-598c33e3bc69\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5sbcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-lxpjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:09Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.577894 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92b37f5a43045595441dda27ecce78e85a7172a9f0b9301b713e4f639388be9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0c3205634aaa1ec9add93ff4da5799da6c5f8702a91abaa5d6b52dfc77a0ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:09Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.593485 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:09Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.609174 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3393ec4-cc72-499a-8557-ec6ca329a142\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5654633d7930074536d4d9b179c36da442f07ef7e7e44c498c38ad51f21c4cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hn6x5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:09Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.621659 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d163276317717346ec2c289a779a2784a200c0a4230bbcef92def1d1c55fcab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:09Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.634173 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bcabca26fad475e5fa46de4c0683cb4671a209cc69dbd1509f933cf799091e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae57cbd99d2dcba3594b74304119a4a8030da193dce32afd77079b3cfaf45713\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8jk2d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:09Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.653487 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"454ee6da-70e5-4d30-89e5-19a35123a278\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f1e8bfa4deb76c13528d9aa2414c14ba6cc0e4637f2cf84c153398b360cad4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://306a3ae23bf504e98f4e7be45cebf984a5dbf47fda9720237c881cf65de43b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0319889506261e48b8db06cd292ba17fb46399b0b2063670c5c0e179a801f9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86a35a51d679468b21fdd174d7148d46f8c1acddbae627ed5c27b61aa399b897\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf3e3bb0b0e0730b9bbd45aad381d5f38940c4a36676db5e9264ccb473f173f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feebe91c810ee2c7c5f9aefe54887ecbc31a89a83a03ac6bbac7f373e15752e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://929743b7e95801b01bb311843648e9294f63d4fbd44eb677511e88a13d62889e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://374f3356b5dd72f14cd7dceb298e27429ba58d7fae836a8c08c06501d2694060\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T14:16:05Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 14:16:05.281339 6327 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 14:16:05.281362 6327 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 14:16:05.281384 6327 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 14:16:05.281392 6327 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 14:16:05.281411 6327 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 14:16:05.281419 6327 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 14:16:05.281442 6327 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 14:16:05.281451 6327 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 14:16:05.281455 6327 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 14:16:05.281458 6327 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 14:16:05.281471 6327 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 14:16:05.281494 6327 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1126 14:16:05.281515 6327 factory.go:656] Stopping watch factory\\\\nI1126 14:16:05.281534 6327 ovnkube.go:599] Stopped ovnkube\\\\nI1126 14:16:05.281532 6327 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:02Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://929743b7e95801b01bb311843648e9294f63d4fbd44eb677511e88a13d62889e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T14:16:07Z\\\",\\\"message\\\":\\\"[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/olm-operator-metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.168\\\\\\\", Port:8443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1126 14:16:07.186947 6506 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1126 14:16:07.187053 6506 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI1126 14:16:07.187061 6506 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1126 14:16:07.187067 6506 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1126 14:16:07.186795 6506 services_controller.go:451] Built service default/kubernetes cluster-wide LB for network=default: []services.LB{}\\\\nF1126 14:16:07.187078 6506 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3b5707990abcd8005bb71376bda6e0f62c32c806b11c5db27e0e06e5ca90c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fdhhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:09Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.658162 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.658217 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.658226 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.658240 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.658250 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:09Z","lastTransitionTime":"2025-11-26T14:16:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.668319 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"821d0155-28e9-4160-8885-aa8cc1d60197\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"t denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 14:15:53.421172 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 14:15:53.421210 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421215 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421220 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 14:15:53.421225 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 14:15:53.421228 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 14:15:53.421231 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 14:15:53.421248 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1126 14:15:53.426755 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3728845122/tls.crt::/tmp/serving-cert-3728845122/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764166537\\\\\\\\\\\\\\\" (2025-11-26 14:15:36 +0000 UTC to 2025-12-26 14:15:37 +0000 UTC (now=2025-11-26 14:15:53.426718319 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426903 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764166548\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764166547\\\\\\\\\\\\\\\" (2025-11-26 13:15:47 +0000 UTC to 2026-11-26 13:15:47 +0000 UTC (now=2025-11-26 14:15:53.426883943 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426929 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1126 14:15:53.426951 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nF1126 14:15:53.427030 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:09Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.680556 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:09Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.736278 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:16:09 crc kubenswrapper[5037]: E1126 14:16:09.736438 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:16:25.736404519 +0000 UTC m=+52.533174703 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.736531 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.736580 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.736608 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.736637 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:16:09 crc kubenswrapper[5037]: E1126 14:16:09.736755 5037 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 14:16:09 crc kubenswrapper[5037]: E1126 14:16:09.736756 5037 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 14:16:09 crc kubenswrapper[5037]: E1126 14:16:09.736782 5037 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 14:16:09 crc kubenswrapper[5037]: E1126 14:16:09.736784 5037 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 14:16:09 crc kubenswrapper[5037]: E1126 14:16:09.736798 5037 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 14:16:09 crc kubenswrapper[5037]: E1126 14:16:09.736808 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 14:16:25.736792689 +0000 UTC m=+52.533562873 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 14:16:09 crc kubenswrapper[5037]: E1126 14:16:09.736810 5037 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 14:16:09 crc kubenswrapper[5037]: E1126 14:16:09.736828 5037 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 14:16:09 crc kubenswrapper[5037]: E1126 14:16:09.736837 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 14:16:25.73682739 +0000 UTC m=+52.533597574 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 14:16:09 crc kubenswrapper[5037]: E1126 14:16:09.736787 5037 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 14:16:09 crc kubenswrapper[5037]: E1126 14:16:09.736868 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 14:16:25.736856261 +0000 UTC m=+52.533626445 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 14:16:09 crc kubenswrapper[5037]: E1126 14:16:09.736886 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 14:16:25.736877541 +0000 UTC m=+52.533647725 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.760509 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.760547 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.760558 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.760578 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.760591 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:09Z","lastTransitionTime":"2025-11-26T14:16:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.863253 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.863325 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.863338 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.863358 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.863372 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:09Z","lastTransitionTime":"2025-11-26T14:16:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.907697 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.907795 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:16:09 crc kubenswrapper[5037]: E1126 14:16:09.907860 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:16:09 crc kubenswrapper[5037]: E1126 14:16:09.907993 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.907707 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:16:09 crc kubenswrapper[5037]: E1126 14:16:09.908164 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.938348 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b18a6f09-7a1e-4965-81e2-dde847147b41-metrics-certs\") pod \"network-metrics-daemon-wjch9\" (UID: \"b18a6f09-7a1e-4965-81e2-dde847147b41\") " pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:16:09 crc kubenswrapper[5037]: E1126 14:16:09.938619 5037 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 14:16:09 crc kubenswrapper[5037]: E1126 14:16:09.938740 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b18a6f09-7a1e-4965-81e2-dde847147b41-metrics-certs podName:b18a6f09-7a1e-4965-81e2-dde847147b41 nodeName:}" failed. No retries permitted until 2025-11-26 14:16:10.93871442 +0000 UTC m=+37.735484604 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b18a6f09-7a1e-4965-81e2-dde847147b41-metrics-certs") pod "network-metrics-daemon-wjch9" (UID: "b18a6f09-7a1e-4965-81e2-dde847147b41") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.965818 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.965869 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.965881 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.965898 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:09 crc kubenswrapper[5037]: I1126 14:16:09.965910 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:09Z","lastTransitionTime":"2025-11-26T14:16:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.069052 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.069086 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.069099 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.069116 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.069131 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:10Z","lastTransitionTime":"2025-11-26T14:16:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.100533 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.100613 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.100626 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.100648 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.100661 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:10Z","lastTransitionTime":"2025-11-26T14:16:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:10 crc kubenswrapper[5037]: E1126 14:16:10.113160 5037 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b247aecb-f60a-4360-9d1b-a1f9057dc4ca\\\",\\\"systemUUID\\\":\\\"4d169cbc-8c3f-42b1-afc1-3f5b57e5ed06\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:10Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.117039 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.117081 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.117093 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.117111 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.117127 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:10Z","lastTransitionTime":"2025-11-26T14:16:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:10 crc kubenswrapper[5037]: E1126 14:16:10.128346 5037 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b247aecb-f60a-4360-9d1b-a1f9057dc4ca\\\",\\\"systemUUID\\\":\\\"4d169cbc-8c3f-42b1-afc1-3f5b57e5ed06\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:10Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.132786 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.132856 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.132873 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.132897 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.132910 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:10Z","lastTransitionTime":"2025-11-26T14:16:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:10 crc kubenswrapper[5037]: E1126 14:16:10.147523 5037 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b247aecb-f60a-4360-9d1b-a1f9057dc4ca\\\",\\\"systemUUID\\\":\\\"4d169cbc-8c3f-42b1-afc1-3f5b57e5ed06\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:10Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.151347 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.151383 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.151395 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.151410 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.151422 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:10Z","lastTransitionTime":"2025-11-26T14:16:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:10 crc kubenswrapper[5037]: E1126 14:16:10.163963 5037 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b247aecb-f60a-4360-9d1b-a1f9057dc4ca\\\",\\\"systemUUID\\\":\\\"4d169cbc-8c3f-42b1-afc1-3f5b57e5ed06\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:10Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.167912 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.167947 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.167957 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.167972 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.167981 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:10Z","lastTransitionTime":"2025-11-26T14:16:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:10 crc kubenswrapper[5037]: E1126 14:16:10.179853 5037 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b247aecb-f60a-4360-9d1b-a1f9057dc4ca\\\",\\\"systemUUID\\\":\\\"4d169cbc-8c3f-42b1-afc1-3f5b57e5ed06\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:10Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:10 crc kubenswrapper[5037]: E1126 14:16:10.179982 5037 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.181562 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.181619 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.181633 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.181653 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.181668 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:10Z","lastTransitionTime":"2025-11-26T14:16:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.284976 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.285024 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.285033 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.285052 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.285063 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:10Z","lastTransitionTime":"2025-11-26T14:16:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.387997 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.388036 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.388045 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.388061 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.388072 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:10Z","lastTransitionTime":"2025-11-26T14:16:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.490647 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.490681 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.490691 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.490706 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.490715 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:10Z","lastTransitionTime":"2025-11-26T14:16:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.593317 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.593367 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.593381 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.593401 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.593413 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:10Z","lastTransitionTime":"2025-11-26T14:16:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.695482 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.695539 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.695555 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.695580 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.695598 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:10Z","lastTransitionTime":"2025-11-26T14:16:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.798192 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.798242 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.798255 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.798279 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.798326 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:10Z","lastTransitionTime":"2025-11-26T14:16:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.901143 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.901186 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.901202 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.901218 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.901229 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:10Z","lastTransitionTime":"2025-11-26T14:16:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.907807 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:16:10 crc kubenswrapper[5037]: E1126 14:16:10.907952 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:16:10 crc kubenswrapper[5037]: I1126 14:16:10.949003 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b18a6f09-7a1e-4965-81e2-dde847147b41-metrics-certs\") pod \"network-metrics-daemon-wjch9\" (UID: \"b18a6f09-7a1e-4965-81e2-dde847147b41\") " pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:16:10 crc kubenswrapper[5037]: E1126 14:16:10.949247 5037 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 14:16:10 crc kubenswrapper[5037]: E1126 14:16:10.949409 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b18a6f09-7a1e-4965-81e2-dde847147b41-metrics-certs podName:b18a6f09-7a1e-4965-81e2-dde847147b41 nodeName:}" failed. No retries permitted until 2025-11-26 14:16:12.949377762 +0000 UTC m=+39.746148166 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b18a6f09-7a1e-4965-81e2-dde847147b41-metrics-certs") pod "network-metrics-daemon-wjch9" (UID: "b18a6f09-7a1e-4965-81e2-dde847147b41") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.003861 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.003901 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.003913 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.003935 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.003949 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:11Z","lastTransitionTime":"2025-11-26T14:16:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.106780 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.106839 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.106850 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.106870 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.106885 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:11Z","lastTransitionTime":"2025-11-26T14:16:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.210824 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.210880 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.210897 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.210923 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.210941 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:11Z","lastTransitionTime":"2025-11-26T14:16:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.312840 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.312874 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.312883 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.312900 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.312912 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:11Z","lastTransitionTime":"2025-11-26T14:16:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.415828 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.415910 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.415924 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.415943 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.415956 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:11Z","lastTransitionTime":"2025-11-26T14:16:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.520442 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.520486 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.520497 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.520515 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.520527 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:11Z","lastTransitionTime":"2025-11-26T14:16:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.623985 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.624033 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.624044 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.624063 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.624077 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:11Z","lastTransitionTime":"2025-11-26T14:16:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.727668 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.727954 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.727962 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.727978 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.727988 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:11Z","lastTransitionTime":"2025-11-26T14:16:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.830634 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.830679 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.830689 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.830705 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.830715 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:11Z","lastTransitionTime":"2025-11-26T14:16:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.907486 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.907545 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.907588 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:16:11 crc kubenswrapper[5037]: E1126 14:16:11.907631 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:16:11 crc kubenswrapper[5037]: E1126 14:16:11.907786 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:16:11 crc kubenswrapper[5037]: E1126 14:16:11.907922 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.933891 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.933931 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.933940 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.933959 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:11 crc kubenswrapper[5037]: I1126 14:16:11.933969 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:11Z","lastTransitionTime":"2025-11-26T14:16:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.040479 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.040531 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.040543 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.040562 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.040574 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:12Z","lastTransitionTime":"2025-11-26T14:16:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.143432 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.143495 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.143505 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.143526 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.143542 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:12Z","lastTransitionTime":"2025-11-26T14:16:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.247095 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.247153 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.247166 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.247195 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.247208 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:12Z","lastTransitionTime":"2025-11-26T14:16:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.349766 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.349818 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.349829 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.349847 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.349862 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:12Z","lastTransitionTime":"2025-11-26T14:16:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.452337 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.452376 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.452384 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.452400 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.452412 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:12Z","lastTransitionTime":"2025-11-26T14:16:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.554970 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.555008 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.555016 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.555030 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.555043 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:12Z","lastTransitionTime":"2025-11-26T14:16:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.657846 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.657888 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.657900 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.657917 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.657929 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:12Z","lastTransitionTime":"2025-11-26T14:16:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.760725 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.760765 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.760776 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.760793 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.760807 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:12Z","lastTransitionTime":"2025-11-26T14:16:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.863949 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.864008 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.864017 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.864036 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.864047 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:12Z","lastTransitionTime":"2025-11-26T14:16:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.907602 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:16:12 crc kubenswrapper[5037]: E1126 14:16:12.907806 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.967260 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.967350 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.967362 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.967383 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.967407 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:12Z","lastTransitionTime":"2025-11-26T14:16:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:12 crc kubenswrapper[5037]: I1126 14:16:12.973985 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b18a6f09-7a1e-4965-81e2-dde847147b41-metrics-certs\") pod \"network-metrics-daemon-wjch9\" (UID: \"b18a6f09-7a1e-4965-81e2-dde847147b41\") " pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:16:12 crc kubenswrapper[5037]: E1126 14:16:12.974192 5037 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 14:16:12 crc kubenswrapper[5037]: E1126 14:16:12.974320 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b18a6f09-7a1e-4965-81e2-dde847147b41-metrics-certs podName:b18a6f09-7a1e-4965-81e2-dde847147b41 nodeName:}" failed. No retries permitted until 2025-11-26 14:16:16.974261702 +0000 UTC m=+43.771031886 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b18a6f09-7a1e-4965-81e2-dde847147b41-metrics-certs") pod "network-metrics-daemon-wjch9" (UID: "b18a6f09-7a1e-4965-81e2-dde847147b41") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.069416 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.069460 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.069470 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.069488 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.069501 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:13Z","lastTransitionTime":"2025-11-26T14:16:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.171946 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.171989 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.172000 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.172016 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.172029 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:13Z","lastTransitionTime":"2025-11-26T14:16:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.274209 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.274249 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.274258 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.274274 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.274306 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:13Z","lastTransitionTime":"2025-11-26T14:16:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.376710 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.376759 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.376770 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.376787 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.376801 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:13Z","lastTransitionTime":"2025-11-26T14:16:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.479224 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.479280 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.479310 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.479327 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.479339 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:13Z","lastTransitionTime":"2025-11-26T14:16:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.581484 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.581545 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.581560 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.581583 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.581602 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:13Z","lastTransitionTime":"2025-11-26T14:16:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.684225 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.684317 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.684329 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.684346 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.684360 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:13Z","lastTransitionTime":"2025-11-26T14:16:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.787423 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.787467 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.787477 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.787499 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.787513 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:13Z","lastTransitionTime":"2025-11-26T14:16:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.889676 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.889728 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.889737 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.889755 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.889767 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:13Z","lastTransitionTime":"2025-11-26T14:16:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.907168 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.907254 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.907374 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:16:13 crc kubenswrapper[5037]: E1126 14:16:13.907466 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:16:13 crc kubenswrapper[5037]: E1126 14:16:13.907501 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:16:13 crc kubenswrapper[5037]: E1126 14:16:13.907549 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.919794 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92b37f5a43045595441dda27ecce78e85a7172a9f0b9301b713e4f639388be9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0c3205634aaa1ec9add93ff4da5799da6c5f8702a91abaa5d6b52dfc77a0ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:13Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.934146 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:13Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.959572 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3393ec4-cc72-499a-8557-ec6ca329a142\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5654633d7930074536d4d9b179c36da442f07ef7e7e44c498c38ad51f21c4cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hn6x5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:13Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.977412 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-lxpjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"490e7d88-ae7f-45f9-ab12-598c33e3bc69\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5sbcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-lxpjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:13Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.991927 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"821d0155-28e9-4160-8885-aa8cc1d60197\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"t denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 14:15:53.421172 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 14:15:53.421210 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421215 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421220 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 14:15:53.421225 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 14:15:53.421228 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 14:15:53.421231 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 14:15:53.421248 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1126 14:15:53.426755 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3728845122/tls.crt::/tmp/serving-cert-3728845122/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764166537\\\\\\\\\\\\\\\" (2025-11-26 14:15:36 +0000 UTC to 2025-12-26 14:15:37 +0000 UTC (now=2025-11-26 14:15:53.426718319 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426903 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764166548\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764166547\\\\\\\\\\\\\\\" (2025-11-26 13:15:47 +0000 UTC to 2026-11-26 13:15:47 +0000 UTC (now=2025-11-26 14:15:53.426883943 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426929 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1126 14:15:53.426951 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nF1126 14:15:53.427030 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:13Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.992722 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.992777 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.992794 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.992818 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:13 crc kubenswrapper[5037]: I1126 14:16:13.992835 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:13Z","lastTransitionTime":"2025-11-26T14:16:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.005570 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:14Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.018741 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d163276317717346ec2c289a779a2784a200c0a4230bbcef92def1d1c55fcab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:14Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.034654 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bcabca26fad475e5fa46de4c0683cb4671a209cc69dbd1509f933cf799091e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae57cbd99d2dcba3594b74304119a4a8030da193dce32afd77079b3cfaf45713\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8jk2d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:14Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.057641 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"454ee6da-70e5-4d30-89e5-19a35123a278\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f1e8bfa4deb76c13528d9aa2414c14ba6cc0e4637f2cf84c153398b360cad4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://306a3ae23bf504e98f4e7be45cebf984a5dbf47fda9720237c881cf65de43b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0319889506261e48b8db06cd292ba17fb46399b0b2063670c5c0e179a801f9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86a35a51d679468b21fdd174d7148d46f8c1acddbae627ed5c27b61aa399b897\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf3e3bb0b0e0730b9bbd45aad381d5f38940c4a36676db5e9264ccb473f173f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feebe91c810ee2c7c5f9aefe54887ecbc31a89a83a03ac6bbac7f373e15752e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://929743b7e95801b01bb311843648e9294f63d4fbd44eb677511e88a13d62889e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://374f3356b5dd72f14cd7dceb298e27429ba58d7fae836a8c08c06501d2694060\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T14:16:05Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 14:16:05.281339 6327 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 14:16:05.281362 6327 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 14:16:05.281384 6327 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 14:16:05.281392 6327 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 14:16:05.281411 6327 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 14:16:05.281419 6327 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 14:16:05.281442 6327 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 14:16:05.281451 6327 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 14:16:05.281455 6327 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 14:16:05.281458 6327 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 14:16:05.281471 6327 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 14:16:05.281494 6327 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1126 14:16:05.281515 6327 factory.go:656] Stopping watch factory\\\\nI1126 14:16:05.281534 6327 ovnkube.go:599] Stopped ovnkube\\\\nI1126 14:16:05.281532 6327 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:02Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://929743b7e95801b01bb311843648e9294f63d4fbd44eb677511e88a13d62889e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T14:16:07Z\\\",\\\"message\\\":\\\"[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/olm-operator-metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.168\\\\\\\", Port:8443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1126 14:16:07.186947 6506 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1126 14:16:07.187053 6506 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI1126 14:16:07.187061 6506 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1126 14:16:07.187067 6506 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1126 14:16:07.186795 6506 services_controller.go:451] Built service default/kubernetes cluster-wide LB for network=default: []services.LB{}\\\\nF1126 14:16:07.187078 6506 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3b5707990abcd8005bb71376bda6e0f62c32c806b11c5db27e0e06e5ca90c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fdhhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:14Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.075658 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2e7efb219e45d8ae5c49bd0dfaa921f6c02e4646ea234df0f8b1a3f50adab58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:14Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.088392 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8tjq6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f4a637d-4b3f-4289-a84c-cd2559430a0e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad15902c02983c178ab3ce11a5103fa144f6dd39fd78aa6243bf9babd10861e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mjhs8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8tjq6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:14Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.096080 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.096135 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.096147 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.096166 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.096178 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:14Z","lastTransitionTime":"2025-11-26T14:16:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.099014 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7bxxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd349c4b-e265-4484-ab92-b4328ebde7fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7d3a1479d3bf371e9b7b4bc4c57843ede4b11d782b732245781ec31e0da71f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrqz9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7bxxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:14Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.109679 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wjch9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b18a6f09-7a1e-4965-81e2-dde847147b41\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zp6hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zp6hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:16:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wjch9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:14Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.123315 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1755f26d-9772-47cd-9336-8c3e94febe60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af730a97f25a795f2f5f5a9b59a3c72868fd1d8f16a451fed1f7ce947779786e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10f95ff9c8fe951bea68ca3932581ecdcb55eee4f45bd79eeeb314fbd67ee80\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://222073fcbe74545f98ff4e8e05ced7ddc2e23933edff2e2135da7fbc33cfac57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:14Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.136837 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:14Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.148774 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cdzgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e677a13-ab89-4820-868f-ad848e66e4b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966a02276865593c5e0d10cb8b03dcfa5da44a3f1fe26a29d17c28c868157eb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j9kgj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1f5ee9860f5858603e973bab92f7eb597b2343eaadeda4e8c58ae962d61223\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j9kgj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:16:08Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cdzgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:14Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.197881 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.197920 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.197928 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.197944 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.197954 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:14Z","lastTransitionTime":"2025-11-26T14:16:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.301250 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.301328 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.301341 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.301386 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.301404 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:14Z","lastTransitionTime":"2025-11-26T14:16:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.403830 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.403881 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.403892 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.403909 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.403922 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:14Z","lastTransitionTime":"2025-11-26T14:16:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.507148 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.507195 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.507210 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.507228 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.507239 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:14Z","lastTransitionTime":"2025-11-26T14:16:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.609948 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.609986 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.609997 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.610015 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.610029 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:14Z","lastTransitionTime":"2025-11-26T14:16:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.713434 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.713488 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.713501 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.713524 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.713536 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:14Z","lastTransitionTime":"2025-11-26T14:16:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.816229 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.816279 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.816319 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.816337 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.816348 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:14Z","lastTransitionTime":"2025-11-26T14:16:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.907133 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:16:14 crc kubenswrapper[5037]: E1126 14:16:14.907307 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.919023 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.919084 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.919096 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.919116 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:14 crc kubenswrapper[5037]: I1126 14:16:14.919130 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:14Z","lastTransitionTime":"2025-11-26T14:16:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.021552 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.021600 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.021613 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.021631 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.021644 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:15Z","lastTransitionTime":"2025-11-26T14:16:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.124215 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.124250 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.124259 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.124274 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.124301 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:15Z","lastTransitionTime":"2025-11-26T14:16:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.226731 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.226781 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.226797 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.226815 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.226827 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:15Z","lastTransitionTime":"2025-11-26T14:16:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.330182 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.330269 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.330313 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.330337 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.330357 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:15Z","lastTransitionTime":"2025-11-26T14:16:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.433147 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.433193 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.433204 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.433221 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.433236 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:15Z","lastTransitionTime":"2025-11-26T14:16:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.536232 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.536273 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.536316 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.536335 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.536346 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:15Z","lastTransitionTime":"2025-11-26T14:16:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.638985 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.639037 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.639048 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.639069 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.639082 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:15Z","lastTransitionTime":"2025-11-26T14:16:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.742064 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.742130 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.742144 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.742164 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.742179 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:15Z","lastTransitionTime":"2025-11-26T14:16:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.845173 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.845245 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.845263 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.845317 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.845332 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:15Z","lastTransitionTime":"2025-11-26T14:16:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.907582 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.907666 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.907635 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:16:15 crc kubenswrapper[5037]: E1126 14:16:15.907782 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:16:15 crc kubenswrapper[5037]: E1126 14:16:15.907935 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:16:15 crc kubenswrapper[5037]: E1126 14:16:15.908110 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.948547 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.948605 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.948621 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.948644 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:15 crc kubenswrapper[5037]: I1126 14:16:15.948657 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:15Z","lastTransitionTime":"2025-11-26T14:16:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.051784 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.051835 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.051849 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.051871 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.051883 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:16Z","lastTransitionTime":"2025-11-26T14:16:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.156017 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.156338 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.156406 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.156487 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.156555 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:16Z","lastTransitionTime":"2025-11-26T14:16:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.259279 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.259359 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.259371 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.259402 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.259415 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:16Z","lastTransitionTime":"2025-11-26T14:16:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.362318 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.362362 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.362371 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.362387 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.362397 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:16Z","lastTransitionTime":"2025-11-26T14:16:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.464773 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.465133 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.465206 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.465321 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.465424 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:16Z","lastTransitionTime":"2025-11-26T14:16:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.568121 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.568161 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.568170 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.568187 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.568197 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:16Z","lastTransitionTime":"2025-11-26T14:16:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.670435 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.670490 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.670500 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.670517 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.670527 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:16Z","lastTransitionTime":"2025-11-26T14:16:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.773279 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.773351 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.773366 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.773386 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.773401 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:16Z","lastTransitionTime":"2025-11-26T14:16:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.876685 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.876973 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.877096 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.877170 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.877236 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:16Z","lastTransitionTime":"2025-11-26T14:16:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.908016 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:16:16 crc kubenswrapper[5037]: E1126 14:16:16.908206 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.980448 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.980498 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.980507 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.980523 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:16 crc kubenswrapper[5037]: I1126 14:16:16.980535 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:16Z","lastTransitionTime":"2025-11-26T14:16:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.015209 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b18a6f09-7a1e-4965-81e2-dde847147b41-metrics-certs\") pod \"network-metrics-daemon-wjch9\" (UID: \"b18a6f09-7a1e-4965-81e2-dde847147b41\") " pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:16:17 crc kubenswrapper[5037]: E1126 14:16:17.015398 5037 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 14:16:17 crc kubenswrapper[5037]: E1126 14:16:17.015486 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b18a6f09-7a1e-4965-81e2-dde847147b41-metrics-certs podName:b18a6f09-7a1e-4965-81e2-dde847147b41 nodeName:}" failed. No retries permitted until 2025-11-26 14:16:25.015463372 +0000 UTC m=+51.812233556 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b18a6f09-7a1e-4965-81e2-dde847147b41-metrics-certs") pod "network-metrics-daemon-wjch9" (UID: "b18a6f09-7a1e-4965-81e2-dde847147b41") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.083612 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.083675 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.083689 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.083711 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.083726 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:17Z","lastTransitionTime":"2025-11-26T14:16:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.187158 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.187209 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.187220 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.187239 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.187251 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:17Z","lastTransitionTime":"2025-11-26T14:16:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.290404 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.290446 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.290455 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.290470 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.290479 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:17Z","lastTransitionTime":"2025-11-26T14:16:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.393327 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.393371 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.393380 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.393395 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.393405 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:17Z","lastTransitionTime":"2025-11-26T14:16:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.496375 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.496768 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.496890 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.496986 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.497060 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:17Z","lastTransitionTime":"2025-11-26T14:16:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.599702 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.599746 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.599756 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.599790 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.599801 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:17Z","lastTransitionTime":"2025-11-26T14:16:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.702990 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.703063 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.703080 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.703105 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.703123 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:17Z","lastTransitionTime":"2025-11-26T14:16:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.805766 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.805807 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.805816 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.805833 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.805842 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:17Z","lastTransitionTime":"2025-11-26T14:16:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.907714 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.907851 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:16:17 crc kubenswrapper[5037]: E1126 14:16:17.907963 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.908043 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:16:17 crc kubenswrapper[5037]: E1126 14:16:17.908132 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:16:17 crc kubenswrapper[5037]: E1126 14:16:17.908213 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.908944 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.909022 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.909032 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.909053 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:17 crc kubenswrapper[5037]: I1126 14:16:17.909064 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:17Z","lastTransitionTime":"2025-11-26T14:16:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.012326 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.012379 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.012389 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.012404 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.012414 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:18Z","lastTransitionTime":"2025-11-26T14:16:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.115524 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.115577 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.115593 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.115614 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.115625 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:18Z","lastTransitionTime":"2025-11-26T14:16:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.219203 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.219273 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.219320 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.219348 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.219367 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:18Z","lastTransitionTime":"2025-11-26T14:16:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.323506 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.324004 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.324263 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.324424 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.324521 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:18Z","lastTransitionTime":"2025-11-26T14:16:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.428444 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.428519 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.428537 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.428565 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.428585 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:18Z","lastTransitionTime":"2025-11-26T14:16:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.531528 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.531599 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.531613 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.531635 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.531651 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:18Z","lastTransitionTime":"2025-11-26T14:16:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.635358 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.635413 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.635425 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.635444 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.635459 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:18Z","lastTransitionTime":"2025-11-26T14:16:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.742140 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.742181 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.742189 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.742206 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.742216 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:18Z","lastTransitionTime":"2025-11-26T14:16:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.845772 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.845825 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.845835 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.845851 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.845865 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:18Z","lastTransitionTime":"2025-11-26T14:16:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.908077 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:16:18 crc kubenswrapper[5037]: E1126 14:16:18.908557 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.908613 5037 scope.go:117] "RemoveContainer" containerID="929743b7e95801b01bb311843648e9294f63d4fbd44eb677511e88a13d62889e" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.925444 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bcabca26fad475e5fa46de4c0683cb4671a209cc69dbd1509f933cf799091e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae57cbd99d2dcba3594b74304119a4a8030da193dce32afd77079b3cfaf45713\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8jk2d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:18Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.949231 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.949660 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.949676 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.949695 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.949710 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:18Z","lastTransitionTime":"2025-11-26T14:16:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.950675 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"454ee6da-70e5-4d30-89e5-19a35123a278\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f1e8bfa4deb76c13528d9aa2414c14ba6cc0e4637f2cf84c153398b360cad4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://306a3ae23bf504e98f4e7be45cebf984a5dbf47fda9720237c881cf65de43b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0319889506261e48b8db06cd292ba17fb46399b0b2063670c5c0e179a801f9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86a35a51d679468b21fdd174d7148d46f8c1acddbae627ed5c27b61aa399b897\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf3e3bb0b0e0730b9bbd45aad381d5f38940c4a36676db5e9264ccb473f173f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feebe91c810ee2c7c5f9aefe54887ecbc31a89a83a03ac6bbac7f373e15752e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://929743b7e95801b01bb311843648e9294f63d4fbd44eb677511e88a13d62889e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://929743b7e95801b01bb311843648e9294f63d4fbd44eb677511e88a13d62889e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T14:16:07Z\\\",\\\"message\\\":\\\"[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/olm-operator-metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.168\\\\\\\", Port:8443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1126 14:16:07.186947 6506 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1126 14:16:07.187053 6506 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI1126 14:16:07.187061 6506 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1126 14:16:07.187067 6506 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1126 14:16:07.186795 6506 services_controller.go:451] Built service default/kubernetes cluster-wide LB for network=default: []services.LB{}\\\\nF1126 14:16:07.187078 6506 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-fdhhj_openshift-ovn-kubernetes(454ee6da-70e5-4d30-89e5-19a35123a278)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3b5707990abcd8005bb71376bda6e0f62c32c806b11c5db27e0e06e5ca90c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fdhhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:18Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.968642 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"821d0155-28e9-4160-8885-aa8cc1d60197\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"t denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 14:15:53.421172 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 14:15:53.421210 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421215 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421220 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 14:15:53.421225 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 14:15:53.421228 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 14:15:53.421231 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 14:15:53.421248 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1126 14:15:53.426755 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3728845122/tls.crt::/tmp/serving-cert-3728845122/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764166537\\\\\\\\\\\\\\\" (2025-11-26 14:15:36 +0000 UTC to 2025-12-26 14:15:37 +0000 UTC (now=2025-11-26 14:15:53.426718319 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426903 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764166548\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764166547\\\\\\\\\\\\\\\" (2025-11-26 13:15:47 +0000 UTC to 2026-11-26 13:15:47 +0000 UTC (now=2025-11-26 14:15:53.426883943 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426929 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1126 14:15:53.426951 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nF1126 14:15:53.427030 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:18Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.982591 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:18Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:18 crc kubenswrapper[5037]: I1126 14:16:18.995750 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d163276317717346ec2c289a779a2784a200c0a4230bbcef92def1d1c55fcab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:18Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.007061 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7bxxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd349c4b-e265-4484-ab92-b4328ebde7fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7d3a1479d3bf371e9b7b4bc4c57843ede4b11d782b732245781ec31e0da71f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrqz9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7bxxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:19Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.018365 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wjch9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b18a6f09-7a1e-4965-81e2-dde847147b41\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zp6hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zp6hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:16:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wjch9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:19Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.036433 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2e7efb219e45d8ae5c49bd0dfaa921f6c02e4646ea234df0f8b1a3f50adab58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:19Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.048711 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8tjq6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f4a637d-4b3f-4289-a84c-cd2559430a0e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad15902c02983c178ab3ce11a5103fa144f6dd39fd78aa6243bf9babd10861e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mjhs8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8tjq6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:19Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.052931 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.052969 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.052981 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.053002 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.053015 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:19Z","lastTransitionTime":"2025-11-26T14:16:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.063719 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1755f26d-9772-47cd-9336-8c3e94febe60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af730a97f25a795f2f5f5a9b59a3c72868fd1d8f16a451fed1f7ce947779786e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10f95ff9c8fe951bea68ca3932581ecdcb55eee4f45bd79eeeb314fbd67ee80\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://222073fcbe74545f98ff4e8e05ced7ddc2e23933edff2e2135da7fbc33cfac57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:19Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.076856 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:19Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.086617 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cdzgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e677a13-ab89-4820-868f-ad848e66e4b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966a02276865593c5e0d10cb8b03dcfa5da44a3f1fe26a29d17c28c868157eb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j9kgj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1f5ee9860f5858603e973bab92f7eb597b2343eaadeda4e8c58ae962d61223\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j9kgj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:16:08Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cdzgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:19Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.097023 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92b37f5a43045595441dda27ecce78e85a7172a9f0b9301b713e4f639388be9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0c3205634aaa1ec9add93ff4da5799da6c5f8702a91abaa5d6b52dfc77a0ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:19Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.110520 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:19Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.129378 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3393ec4-cc72-499a-8557-ec6ca329a142\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5654633d7930074536d4d9b179c36da442f07ef7e7e44c498c38ad51f21c4cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hn6x5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:19Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.142447 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-lxpjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"490e7d88-ae7f-45f9-ab12-598c33e3bc69\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5sbcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-lxpjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:19Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.155766 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.155835 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.155852 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.155878 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.155897 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:19Z","lastTransitionTime":"2025-11-26T14:16:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.259386 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.259447 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.259461 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.259482 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.259496 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:19Z","lastTransitionTime":"2025-11-26T14:16:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.342417 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fdhhj_454ee6da-70e5-4d30-89e5-19a35123a278/ovnkube-controller/1.log" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.346745 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" event={"ID":"454ee6da-70e5-4d30-89e5-19a35123a278","Type":"ContainerStarted","Data":"68b717ee0e14bec16d53d7823747258f8d3b9ef55dc27cc479637ddc9fe360e3"} Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.347455 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.362700 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.362771 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.362790 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.362817 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.362835 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:19Z","lastTransitionTime":"2025-11-26T14:16:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.373420 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"821d0155-28e9-4160-8885-aa8cc1d60197\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"t denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 14:15:53.421172 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 14:15:53.421210 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421215 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421220 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 14:15:53.421225 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 14:15:53.421228 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 14:15:53.421231 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 14:15:53.421248 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1126 14:15:53.426755 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3728845122/tls.crt::/tmp/serving-cert-3728845122/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764166537\\\\\\\\\\\\\\\" (2025-11-26 14:15:36 +0000 UTC to 2025-12-26 14:15:37 +0000 UTC (now=2025-11-26 14:15:53.426718319 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426903 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764166548\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764166547\\\\\\\\\\\\\\\" (2025-11-26 13:15:47 +0000 UTC to 2026-11-26 13:15:47 +0000 UTC (now=2025-11-26 14:15:53.426883943 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426929 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1126 14:15:53.426951 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nF1126 14:15:53.427030 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:19Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.390248 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:19Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.404249 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d163276317717346ec2c289a779a2784a200c0a4230bbcef92def1d1c55fcab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:19Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.420072 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bcabca26fad475e5fa46de4c0683cb4671a209cc69dbd1509f933cf799091e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae57cbd99d2dcba3594b74304119a4a8030da193dce32afd77079b3cfaf45713\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8jk2d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:19Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.459051 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"454ee6da-70e5-4d30-89e5-19a35123a278\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f1e8bfa4deb76c13528d9aa2414c14ba6cc0e4637f2cf84c153398b360cad4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://306a3ae23bf504e98f4e7be45cebf984a5dbf47fda9720237c881cf65de43b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0319889506261e48b8db06cd292ba17fb46399b0b2063670c5c0e179a801f9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86a35a51d679468b21fdd174d7148d46f8c1acddbae627ed5c27b61aa399b897\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf3e3bb0b0e0730b9bbd45aad381d5f38940c4a36676db5e9264ccb473f173f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feebe91c810ee2c7c5f9aefe54887ecbc31a89a83a03ac6bbac7f373e15752e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68b717ee0e14bec16d53d7823747258f8d3b9ef55dc27cc479637ddc9fe360e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://929743b7e95801b01bb311843648e9294f63d4fbd44eb677511e88a13d62889e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T14:16:07Z\\\",\\\"message\\\":\\\"[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/olm-operator-metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.168\\\\\\\", Port:8443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1126 14:16:07.186947 6506 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1126 14:16:07.187053 6506 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI1126 14:16:07.187061 6506 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1126 14:16:07.187067 6506 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1126 14:16:07.186795 6506 services_controller.go:451] Built service default/kubernetes cluster-wide LB for network=default: []services.LB{}\\\\nF1126 14:16:07.187078 6506 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3b5707990abcd8005bb71376bda6e0f62c32c806b11c5db27e0e06e5ca90c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fdhhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:19Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.466225 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.466275 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.466313 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.466335 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.466348 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:19Z","lastTransitionTime":"2025-11-26T14:16:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.485681 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2e7efb219e45d8ae5c49bd0dfaa921f6c02e4646ea234df0f8b1a3f50adab58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:19Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.504255 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8tjq6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f4a637d-4b3f-4289-a84c-cd2559430a0e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad15902c02983c178ab3ce11a5103fa144f6dd39fd78aa6243bf9babd10861e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mjhs8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8tjq6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:19Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.524966 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7bxxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd349c4b-e265-4484-ab92-b4328ebde7fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7d3a1479d3bf371e9b7b4bc4c57843ede4b11d782b732245781ec31e0da71f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrqz9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7bxxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:19Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.547400 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wjch9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b18a6f09-7a1e-4965-81e2-dde847147b41\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zp6hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zp6hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:16:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wjch9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:19Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.566679 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1755f26d-9772-47cd-9336-8c3e94febe60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af730a97f25a795f2f5f5a9b59a3c72868fd1d8f16a451fed1f7ce947779786e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10f95ff9c8fe951bea68ca3932581ecdcb55eee4f45bd79eeeb314fbd67ee80\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://222073fcbe74545f98ff4e8e05ced7ddc2e23933edff2e2135da7fbc33cfac57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:19Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.568386 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.568427 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.568439 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.568456 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.568470 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:19Z","lastTransitionTime":"2025-11-26T14:16:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.583353 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:19Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.596444 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cdzgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e677a13-ab89-4820-868f-ad848e66e4b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966a02276865593c5e0d10cb8b03dcfa5da44a3f1fe26a29d17c28c868157eb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j9kgj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1f5ee9860f5858603e973bab92f7eb597b2343eaadeda4e8c58ae962d61223\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j9kgj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:16:08Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cdzgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:19Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.627428 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92b37f5a43045595441dda27ecce78e85a7172a9f0b9301b713e4f639388be9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0c3205634aaa1ec9add93ff4da5799da6c5f8702a91abaa5d6b52dfc77a0ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:19Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.645800 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:19Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.664183 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3393ec4-cc72-499a-8557-ec6ca329a142\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5654633d7930074536d4d9b179c36da442f07ef7e7e44c498c38ad51f21c4cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hn6x5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:19Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.672006 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.672065 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.672079 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.672101 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.672115 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:19Z","lastTransitionTime":"2025-11-26T14:16:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.682160 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-lxpjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"490e7d88-ae7f-45f9-ab12-598c33e3bc69\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5sbcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-lxpjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:19Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.775123 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.775181 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.775196 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.775218 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.775231 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:19Z","lastTransitionTime":"2025-11-26T14:16:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.878441 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.878506 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.878521 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.878541 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.878556 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:19Z","lastTransitionTime":"2025-11-26T14:16:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.907923 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.907987 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.907934 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:16:19 crc kubenswrapper[5037]: E1126 14:16:19.908152 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:16:19 crc kubenswrapper[5037]: E1126 14:16:19.908340 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:16:19 crc kubenswrapper[5037]: E1126 14:16:19.908432 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.981598 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.981653 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.981666 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.981686 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:19 crc kubenswrapper[5037]: I1126 14:16:19.981700 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:19Z","lastTransitionTime":"2025-11-26T14:16:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.085312 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.085365 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.085376 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.085394 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.085405 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:20Z","lastTransitionTime":"2025-11-26T14:16:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.188453 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.188518 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.188532 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.188559 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.188573 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:20Z","lastTransitionTime":"2025-11-26T14:16:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.292258 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.292340 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.292358 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.292383 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.292403 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:20Z","lastTransitionTime":"2025-11-26T14:16:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.353423 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fdhhj_454ee6da-70e5-4d30-89e5-19a35123a278/ovnkube-controller/2.log" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.354213 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fdhhj_454ee6da-70e5-4d30-89e5-19a35123a278/ovnkube-controller/1.log" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.357312 5037 generic.go:334] "Generic (PLEG): container finished" podID="454ee6da-70e5-4d30-89e5-19a35123a278" containerID="68b717ee0e14bec16d53d7823747258f8d3b9ef55dc27cc479637ddc9fe360e3" exitCode=1 Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.357376 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" event={"ID":"454ee6da-70e5-4d30-89e5-19a35123a278","Type":"ContainerDied","Data":"68b717ee0e14bec16d53d7823747258f8d3b9ef55dc27cc479637ddc9fe360e3"} Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.357441 5037 scope.go:117] "RemoveContainer" containerID="929743b7e95801b01bb311843648e9294f63d4fbd44eb677511e88a13d62889e" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.359019 5037 scope.go:117] "RemoveContainer" containerID="68b717ee0e14bec16d53d7823747258f8d3b9ef55dc27cc479637ddc9fe360e3" Nov 26 14:16:20 crc kubenswrapper[5037]: E1126 14:16:20.359181 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-fdhhj_openshift-ovn-kubernetes(454ee6da-70e5-4d30-89e5-19a35123a278)\"" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.388823 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2e7efb219e45d8ae5c49bd0dfaa921f6c02e4646ea234df0f8b1a3f50adab58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:20Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.395459 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.395533 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.395557 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.395591 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.395617 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:20Z","lastTransitionTime":"2025-11-26T14:16:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.403222 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8tjq6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f4a637d-4b3f-4289-a84c-cd2559430a0e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad15902c02983c178ab3ce11a5103fa144f6dd39fd78aa6243bf9babd10861e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mjhs8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8tjq6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:20Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.415540 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7bxxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd349c4b-e265-4484-ab92-b4328ebde7fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7d3a1479d3bf371e9b7b4bc4c57843ede4b11d782b732245781ec31e0da71f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrqz9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7bxxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:20Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.429542 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wjch9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b18a6f09-7a1e-4965-81e2-dde847147b41\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zp6hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zp6hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:16:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wjch9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:20Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.445713 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1755f26d-9772-47cd-9336-8c3e94febe60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af730a97f25a795f2f5f5a9b59a3c72868fd1d8f16a451fed1f7ce947779786e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10f95ff9c8fe951bea68ca3932581ecdcb55eee4f45bd79eeeb314fbd67ee80\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://222073fcbe74545f98ff4e8e05ced7ddc2e23933edff2e2135da7fbc33cfac57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:20Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.461892 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:20Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.475642 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cdzgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e677a13-ab89-4820-868f-ad848e66e4b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966a02276865593c5e0d10cb8b03dcfa5da44a3f1fe26a29d17c28c868157eb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j9kgj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1f5ee9860f5858603e973bab92f7eb597b2343eaadeda4e8c58ae962d61223\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j9kgj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:16:08Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cdzgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:20Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.490682 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92b37f5a43045595441dda27ecce78e85a7172a9f0b9301b713e4f639388be9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0c3205634aaa1ec9add93ff4da5799da6c5f8702a91abaa5d6b52dfc77a0ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:20Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.499034 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.499070 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.499082 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.499101 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.499114 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:20Z","lastTransitionTime":"2025-11-26T14:16:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.504359 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:20Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.523826 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3393ec4-cc72-499a-8557-ec6ca329a142\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5654633d7930074536d4d9b179c36da442f07ef7e7e44c498c38ad51f21c4cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hn6x5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:20Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.541011 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-lxpjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"490e7d88-ae7f-45f9-ab12-598c33e3bc69\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5sbcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-lxpjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:20Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.554477 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.554527 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.554545 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.554571 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.554590 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:20Z","lastTransitionTime":"2025-11-26T14:16:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.562896 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"821d0155-28e9-4160-8885-aa8cc1d60197\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"t denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 14:15:53.421172 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 14:15:53.421210 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421215 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421220 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 14:15:53.421225 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 14:15:53.421228 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 14:15:53.421231 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 14:15:53.421248 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1126 14:15:53.426755 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3728845122/tls.crt::/tmp/serving-cert-3728845122/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764166537\\\\\\\\\\\\\\\" (2025-11-26 14:15:36 +0000 UTC to 2025-12-26 14:15:37 +0000 UTC (now=2025-11-26 14:15:53.426718319 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426903 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764166548\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764166547\\\\\\\\\\\\\\\" (2025-11-26 13:15:47 +0000 UTC to 2026-11-26 13:15:47 +0000 UTC (now=2025-11-26 14:15:53.426883943 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426929 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1126 14:15:53.426951 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nF1126 14:15:53.427030 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:20Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:20 crc kubenswrapper[5037]: E1126 14:16:20.571682 5037 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b247aecb-f60a-4360-9d1b-a1f9057dc4ca\\\",\\\"systemUUID\\\":\\\"4d169cbc-8c3f-42b1-afc1-3f5b57e5ed06\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:20Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.576799 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.576864 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.576878 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.576903 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.576919 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:20Z","lastTransitionTime":"2025-11-26T14:16:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.580332 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:20Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:20 crc kubenswrapper[5037]: E1126 14:16:20.592996 5037 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b247aecb-f60a-4360-9d1b-a1f9057dc4ca\\\",\\\"systemUUID\\\":\\\"4d169cbc-8c3f-42b1-afc1-3f5b57e5ed06\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:20Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.595952 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d163276317717346ec2c289a779a2784a200c0a4230bbcef92def1d1c55fcab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:20Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.597964 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.598022 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.598036 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.598060 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.598072 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:20Z","lastTransitionTime":"2025-11-26T14:16:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.610665 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bcabca26fad475e5fa46de4c0683cb4671a209cc69dbd1509f933cf799091e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae57cbd99d2dcba3594b74304119a4a8030da193dce32afd77079b3cfaf45713\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8jk2d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:20Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:20 crc kubenswrapper[5037]: E1126 14:16:20.613264 5037 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b247aecb-f60a-4360-9d1b-a1f9057dc4ca\\\",\\\"systemUUID\\\":\\\"4d169cbc-8c3f-42b1-afc1-3f5b57e5ed06\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:20Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.617191 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.617247 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.617261 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.617297 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.617314 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:20Z","lastTransitionTime":"2025-11-26T14:16:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.633233 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"454ee6da-70e5-4d30-89e5-19a35123a278\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f1e8bfa4deb76c13528d9aa2414c14ba6cc0e4637f2cf84c153398b360cad4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://306a3ae23bf504e98f4e7be45cebf984a5dbf47fda9720237c881cf65de43b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0319889506261e48b8db06cd292ba17fb46399b0b2063670c5c0e179a801f9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86a35a51d679468b21fdd174d7148d46f8c1acddbae627ed5c27b61aa399b897\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf3e3bb0b0e0730b9bbd45aad381d5f38940c4a36676db5e9264ccb473f173f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feebe91c810ee2c7c5f9aefe54887ecbc31a89a83a03ac6bbac7f373e15752e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68b717ee0e14bec16d53d7823747258f8d3b9ef55dc27cc479637ddc9fe360e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://929743b7e95801b01bb311843648e9294f63d4fbd44eb677511e88a13d62889e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T14:16:07Z\\\",\\\"message\\\":\\\"[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/olm-operator-metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.168\\\\\\\", Port:8443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1126 14:16:07.186947 6506 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1126 14:16:07.187053 6506 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI1126 14:16:07.187061 6506 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1126 14:16:07.187067 6506 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1126 14:16:07.186795 6506 services_controller.go:451] Built service default/kubernetes cluster-wide LB for network=default: []services.LB{}\\\\nF1126 14:16:07.187078 6506 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68b717ee0e14bec16d53d7823747258f8d3b9ef55dc27cc479637ddc9fe360e3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T14:16:19Z\\\",\\\"message\\\":\\\"for services for network=default\\\\nI1126 14:16:19.926961 6706 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 14:16:19.927047 6706 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 14:16:19.927368 6706 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 14:16:19.927492 6706 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 14:16:19.927856 6706 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 14:16:19.927911 6706 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 14:16:19.927973 6706 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 14:16:19.927987 6706 factory.go:656] Stopping watch factory\\\\nI1126 14:16:19.928004 6706 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 14:16:19.928574 6706 ovnkube.go:599] Stopped ovnkube\\\\nI1126 14:16:19.928626 6706 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 14:16:19.928724 6706 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3b5707990abcd8005bb71376bda6e0f62c32c806b11c5db27e0e06e5ca90c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fdhhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:20Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:20 crc kubenswrapper[5037]: E1126 14:16:20.633581 5037 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b247aecb-f60a-4360-9d1b-a1f9057dc4ca\\\",\\\"systemUUID\\\":\\\"4d169cbc-8c3f-42b1-afc1-3f5b57e5ed06\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:20Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.638769 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.638813 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.638823 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.638842 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.638856 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:20Z","lastTransitionTime":"2025-11-26T14:16:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:20 crc kubenswrapper[5037]: E1126 14:16:20.652639 5037 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b247aecb-f60a-4360-9d1b-a1f9057dc4ca\\\",\\\"systemUUID\\\":\\\"4d169cbc-8c3f-42b1-afc1-3f5b57e5ed06\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:20Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:20 crc kubenswrapper[5037]: E1126 14:16:20.652806 5037 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.654975 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.655027 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.655037 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.655058 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.655073 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:20Z","lastTransitionTime":"2025-11-26T14:16:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.758504 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.758551 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.758562 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.758578 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.758590 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:20Z","lastTransitionTime":"2025-11-26T14:16:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.861526 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.861589 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.861609 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.861634 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.861652 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:20Z","lastTransitionTime":"2025-11-26T14:16:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.908057 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:16:20 crc kubenswrapper[5037]: E1126 14:16:20.908204 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.966329 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.966407 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.966431 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.966461 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:20 crc kubenswrapper[5037]: I1126 14:16:20.966480 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:20Z","lastTransitionTime":"2025-11-26T14:16:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.070133 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.070212 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.070231 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.070254 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.070358 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:21Z","lastTransitionTime":"2025-11-26T14:16:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.178891 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.178975 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.179041 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.179067 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.179086 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:21Z","lastTransitionTime":"2025-11-26T14:16:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.283842 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.283928 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.283948 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.283977 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.284000 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:21Z","lastTransitionTime":"2025-11-26T14:16:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.364179 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fdhhj_454ee6da-70e5-4d30-89e5-19a35123a278/ovnkube-controller/2.log" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.370173 5037 scope.go:117] "RemoveContainer" containerID="68b717ee0e14bec16d53d7823747258f8d3b9ef55dc27cc479637ddc9fe360e3" Nov 26 14:16:21 crc kubenswrapper[5037]: E1126 14:16:21.370421 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-fdhhj_openshift-ovn-kubernetes(454ee6da-70e5-4d30-89e5-19a35123a278)\"" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.387181 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.387235 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.387330 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.387355 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.387366 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:21Z","lastTransitionTime":"2025-11-26T14:16:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.391140 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2e7efb219e45d8ae5c49bd0dfaa921f6c02e4646ea234df0f8b1a3f50adab58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:21Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.405601 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8tjq6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f4a637d-4b3f-4289-a84c-cd2559430a0e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad15902c02983c178ab3ce11a5103fa144f6dd39fd78aa6243bf9babd10861e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mjhs8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8tjq6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:21Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.419779 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7bxxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd349c4b-e265-4484-ab92-b4328ebde7fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7d3a1479d3bf371e9b7b4bc4c57843ede4b11d782b732245781ec31e0da71f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrqz9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7bxxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:21Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.433447 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wjch9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b18a6f09-7a1e-4965-81e2-dde847147b41\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zp6hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zp6hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:16:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wjch9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:21Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.448521 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1755f26d-9772-47cd-9336-8c3e94febe60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af730a97f25a795f2f5f5a9b59a3c72868fd1d8f16a451fed1f7ce947779786e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10f95ff9c8fe951bea68ca3932581ecdcb55eee4f45bd79eeeb314fbd67ee80\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://222073fcbe74545f98ff4e8e05ced7ddc2e23933edff2e2135da7fbc33cfac57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:21Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.461911 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:21Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.475590 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cdzgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e677a13-ab89-4820-868f-ad848e66e4b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966a02276865593c5e0d10cb8b03dcfa5da44a3f1fe26a29d17c28c868157eb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j9kgj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1f5ee9860f5858603e973bab92f7eb597b2343eaadeda4e8c58ae962d61223\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j9kgj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:16:08Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cdzgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:21Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.489813 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92b37f5a43045595441dda27ecce78e85a7172a9f0b9301b713e4f639388be9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0c3205634aaa1ec9add93ff4da5799da6c5f8702a91abaa5d6b52dfc77a0ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:21Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.490165 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.490207 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.490221 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.490252 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.490264 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:21Z","lastTransitionTime":"2025-11-26T14:16:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.500852 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:21Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.515431 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3393ec4-cc72-499a-8557-ec6ca329a142\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5654633d7930074536d4d9b179c36da442f07ef7e7e44c498c38ad51f21c4cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hn6x5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:21Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.528697 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-lxpjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"490e7d88-ae7f-45f9-ab12-598c33e3bc69\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5sbcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-lxpjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:21Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.541682 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"821d0155-28e9-4160-8885-aa8cc1d60197\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"t denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 14:15:53.421172 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 14:15:53.421210 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421215 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421220 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 14:15:53.421225 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 14:15:53.421228 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 14:15:53.421231 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 14:15:53.421248 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1126 14:15:53.426755 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3728845122/tls.crt::/tmp/serving-cert-3728845122/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764166537\\\\\\\\\\\\\\\" (2025-11-26 14:15:36 +0000 UTC to 2025-12-26 14:15:37 +0000 UTC (now=2025-11-26 14:15:53.426718319 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426903 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764166548\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764166547\\\\\\\\\\\\\\\" (2025-11-26 13:15:47 +0000 UTC to 2026-11-26 13:15:47 +0000 UTC (now=2025-11-26 14:15:53.426883943 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426929 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1126 14:15:53.426951 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nF1126 14:15:53.427030 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:21Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.553421 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:21Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.565977 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d163276317717346ec2c289a779a2784a200c0a4230bbcef92def1d1c55fcab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:21Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.578577 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bcabca26fad475e5fa46de4c0683cb4671a209cc69dbd1509f933cf799091e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae57cbd99d2dcba3594b74304119a4a8030da193dce32afd77079b3cfaf45713\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8jk2d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:21Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.592990 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.593028 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.593041 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.593060 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.593072 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:21Z","lastTransitionTime":"2025-11-26T14:16:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.598589 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"454ee6da-70e5-4d30-89e5-19a35123a278\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f1e8bfa4deb76c13528d9aa2414c14ba6cc0e4637f2cf84c153398b360cad4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://306a3ae23bf504e98f4e7be45cebf984a5dbf47fda9720237c881cf65de43b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0319889506261e48b8db06cd292ba17fb46399b0b2063670c5c0e179a801f9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86a35a51d679468b21fdd174d7148d46f8c1acddbae627ed5c27b61aa399b897\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf3e3bb0b0e0730b9bbd45aad381d5f38940c4a36676db5e9264ccb473f173f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feebe91c810ee2c7c5f9aefe54887ecbc31a89a83a03ac6bbac7f373e15752e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68b717ee0e14bec16d53d7823747258f8d3b9ef55dc27cc479637ddc9fe360e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68b717ee0e14bec16d53d7823747258f8d3b9ef55dc27cc479637ddc9fe360e3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T14:16:19Z\\\",\\\"message\\\":\\\"for services for network=default\\\\nI1126 14:16:19.926961 6706 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 14:16:19.927047 6706 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 14:16:19.927368 6706 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 14:16:19.927492 6706 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 14:16:19.927856 6706 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 14:16:19.927911 6706 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 14:16:19.927973 6706 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 14:16:19.927987 6706 factory.go:656] Stopping watch factory\\\\nI1126 14:16:19.928004 6706 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 14:16:19.928574 6706 ovnkube.go:599] Stopped ovnkube\\\\nI1126 14:16:19.928626 6706 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 14:16:19.928724 6706 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:19Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-fdhhj_openshift-ovn-kubernetes(454ee6da-70e5-4d30-89e5-19a35123a278)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3b5707990abcd8005bb71376bda6e0f62c32c806b11c5db27e0e06e5ca90c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fdhhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:21Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.696468 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.696562 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.696579 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.696600 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.696615 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:21Z","lastTransitionTime":"2025-11-26T14:16:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.798736 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.798795 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.798809 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.798831 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.798846 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:21Z","lastTransitionTime":"2025-11-26T14:16:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.902445 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.902512 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.902524 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.902574 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.902593 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:21Z","lastTransitionTime":"2025-11-26T14:16:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.907914 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.908008 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:16:21 crc kubenswrapper[5037]: I1126 14:16:21.907917 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:16:21 crc kubenswrapper[5037]: E1126 14:16:21.908157 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:16:21 crc kubenswrapper[5037]: E1126 14:16:21.908452 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:16:21 crc kubenswrapper[5037]: E1126 14:16:21.908365 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.006477 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.006548 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.006573 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.006604 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.006625 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:22Z","lastTransitionTime":"2025-11-26T14:16:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.109822 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.109896 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.109919 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.109953 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.109978 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:22Z","lastTransitionTime":"2025-11-26T14:16:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.213585 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.213653 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.213669 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.213693 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.213717 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:22Z","lastTransitionTime":"2025-11-26T14:16:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.316882 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.316951 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.316963 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.316980 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.317042 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:22Z","lastTransitionTime":"2025-11-26T14:16:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.420743 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.420834 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.420855 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.420889 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.420908 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:22Z","lastTransitionTime":"2025-11-26T14:16:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.525472 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.525565 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.525588 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.525624 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.525654 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:22Z","lastTransitionTime":"2025-11-26T14:16:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.629275 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.629430 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.629445 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.629465 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.629480 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:22Z","lastTransitionTime":"2025-11-26T14:16:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.732361 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.732435 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.732446 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.732462 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.732474 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:22Z","lastTransitionTime":"2025-11-26T14:16:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.834788 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.834847 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.834861 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.834881 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.834894 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:22Z","lastTransitionTime":"2025-11-26T14:16:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.907798 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:16:22 crc kubenswrapper[5037]: E1126 14:16:22.908019 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.938092 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.938122 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.938130 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.938145 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:22 crc kubenswrapper[5037]: I1126 14:16:22.938155 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:22Z","lastTransitionTime":"2025-11-26T14:16:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.041028 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.041116 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.041127 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.041147 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.041160 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:23Z","lastTransitionTime":"2025-11-26T14:16:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.143744 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.143797 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.143809 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.143830 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.143842 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:23Z","lastTransitionTime":"2025-11-26T14:16:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.246924 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.246983 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.246998 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.247020 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.247036 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:23Z","lastTransitionTime":"2025-11-26T14:16:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.349891 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.349937 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.349949 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.349968 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.349981 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:23Z","lastTransitionTime":"2025-11-26T14:16:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.452735 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.452810 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.452827 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.452858 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.452876 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:23Z","lastTransitionTime":"2025-11-26T14:16:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.556309 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.556372 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.556387 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.556408 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.556421 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:23Z","lastTransitionTime":"2025-11-26T14:16:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.659840 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.659886 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.659916 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.659933 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.659948 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:23Z","lastTransitionTime":"2025-11-26T14:16:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.763787 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.763843 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.763854 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.763876 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.763888 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:23Z","lastTransitionTime":"2025-11-26T14:16:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.866393 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.866440 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.866449 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.866464 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.866474 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:23Z","lastTransitionTime":"2025-11-26T14:16:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.907517 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.907621 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.907909 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:16:23 crc kubenswrapper[5037]: E1126 14:16:23.907987 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:16:23 crc kubenswrapper[5037]: E1126 14:16:23.908075 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:16:23 crc kubenswrapper[5037]: E1126 14:16:23.907908 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.927681 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"821d0155-28e9-4160-8885-aa8cc1d60197\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"t denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 14:15:53.421172 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 14:15:53.421210 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421215 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421220 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 14:15:53.421225 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 14:15:53.421228 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 14:15:53.421231 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 14:15:53.421248 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1126 14:15:53.426755 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3728845122/tls.crt::/tmp/serving-cert-3728845122/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764166537\\\\\\\\\\\\\\\" (2025-11-26 14:15:36 +0000 UTC to 2025-12-26 14:15:37 +0000 UTC (now=2025-11-26 14:15:53.426718319 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426903 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764166548\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764166547\\\\\\\\\\\\\\\" (2025-11-26 13:15:47 +0000 UTC to 2026-11-26 13:15:47 +0000 UTC (now=2025-11-26 14:15:53.426883943 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426929 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1126 14:15:53.426951 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nF1126 14:15:53.427030 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:23Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.942965 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:23Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.958002 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d163276317717346ec2c289a779a2784a200c0a4230bbcef92def1d1c55fcab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:23Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.970189 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.970270 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.970296 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.970316 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.970335 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:23Z","lastTransitionTime":"2025-11-26T14:16:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.973710 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bcabca26fad475e5fa46de4c0683cb4671a209cc69dbd1509f933cf799091e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae57cbd99d2dcba3594b74304119a4a8030da193dce32afd77079b3cfaf45713\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8jk2d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:23Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:23 crc kubenswrapper[5037]: I1126 14:16:23.992159 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"454ee6da-70e5-4d30-89e5-19a35123a278\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f1e8bfa4deb76c13528d9aa2414c14ba6cc0e4637f2cf84c153398b360cad4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://306a3ae23bf504e98f4e7be45cebf984a5dbf47fda9720237c881cf65de43b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0319889506261e48b8db06cd292ba17fb46399b0b2063670c5c0e179a801f9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86a35a51d679468b21fdd174d7148d46f8c1acddbae627ed5c27b61aa399b897\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf3e3bb0b0e0730b9bbd45aad381d5f38940c4a36676db5e9264ccb473f173f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feebe91c810ee2c7c5f9aefe54887ecbc31a89a83a03ac6bbac7f373e15752e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68b717ee0e14bec16d53d7823747258f8d3b9ef55dc27cc479637ddc9fe360e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68b717ee0e14bec16d53d7823747258f8d3b9ef55dc27cc479637ddc9fe360e3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T14:16:19Z\\\",\\\"message\\\":\\\"for services for network=default\\\\nI1126 14:16:19.926961 6706 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 14:16:19.927047 6706 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 14:16:19.927368 6706 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 14:16:19.927492 6706 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 14:16:19.927856 6706 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 14:16:19.927911 6706 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 14:16:19.927973 6706 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 14:16:19.927987 6706 factory.go:656] Stopping watch factory\\\\nI1126 14:16:19.928004 6706 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 14:16:19.928574 6706 ovnkube.go:599] Stopped ovnkube\\\\nI1126 14:16:19.928626 6706 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 14:16:19.928724 6706 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:19Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-fdhhj_openshift-ovn-kubernetes(454ee6da-70e5-4d30-89e5-19a35123a278)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3b5707990abcd8005bb71376bda6e0f62c32c806b11c5db27e0e06e5ca90c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fdhhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:23Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.005361 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2e7efb219e45d8ae5c49bd0dfaa921f6c02e4646ea234df0f8b1a3f50adab58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:24Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.016679 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8tjq6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f4a637d-4b3f-4289-a84c-cd2559430a0e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad15902c02983c178ab3ce11a5103fa144f6dd39fd78aa6243bf9babd10861e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mjhs8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8tjq6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:24Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.032909 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7bxxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd349c4b-e265-4484-ab92-b4328ebde7fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7d3a1479d3bf371e9b7b4bc4c57843ede4b11d782b732245781ec31e0da71f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrqz9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7bxxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:24Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.047221 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wjch9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b18a6f09-7a1e-4965-81e2-dde847147b41\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zp6hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zp6hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:16:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wjch9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:24Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.060354 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1755f26d-9772-47cd-9336-8c3e94febe60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af730a97f25a795f2f5f5a9b59a3c72868fd1d8f16a451fed1f7ce947779786e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10f95ff9c8fe951bea68ca3932581ecdcb55eee4f45bd79eeeb314fbd67ee80\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://222073fcbe74545f98ff4e8e05ced7ddc2e23933edff2e2135da7fbc33cfac57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:24Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.072861 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.072916 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.072932 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.072952 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.072966 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:24Z","lastTransitionTime":"2025-11-26T14:16:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.072982 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:24Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.085631 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cdzgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e677a13-ab89-4820-868f-ad848e66e4b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966a02276865593c5e0d10cb8b03dcfa5da44a3f1fe26a29d17c28c868157eb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j9kgj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1f5ee9860f5858603e973bab92f7eb597b2343eaadeda4e8c58ae962d61223\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j9kgj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:16:08Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cdzgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:24Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.098585 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92b37f5a43045595441dda27ecce78e85a7172a9f0b9301b713e4f639388be9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0c3205634aaa1ec9add93ff4da5799da6c5f8702a91abaa5d6b52dfc77a0ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:24Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.111876 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:24Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.124836 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3393ec4-cc72-499a-8557-ec6ca329a142\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5654633d7930074536d4d9b179c36da442f07ef7e7e44c498c38ad51f21c4cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hn6x5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:24Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.142063 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-lxpjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"490e7d88-ae7f-45f9-ab12-598c33e3bc69\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5sbcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-lxpjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:24Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.176076 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.176131 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.176143 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.176164 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.176180 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:24Z","lastTransitionTime":"2025-11-26T14:16:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.278707 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.278767 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.278778 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.278796 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.278810 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:24Z","lastTransitionTime":"2025-11-26T14:16:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.381186 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.381256 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.381278 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.381338 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.381353 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:24Z","lastTransitionTime":"2025-11-26T14:16:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.485569 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.485654 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.485678 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.485713 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.485738 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:24Z","lastTransitionTime":"2025-11-26T14:16:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.589729 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.589831 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.589857 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.589892 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.589916 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:24Z","lastTransitionTime":"2025-11-26T14:16:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.693342 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.693419 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.693453 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.693485 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.693519 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:24Z","lastTransitionTime":"2025-11-26T14:16:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.797458 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.797536 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.797554 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.797581 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.797598 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:24Z","lastTransitionTime":"2025-11-26T14:16:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.901983 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.902057 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.902076 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.902103 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.902122 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:24Z","lastTransitionTime":"2025-11-26T14:16:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:24 crc kubenswrapper[5037]: I1126 14:16:24.907143 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:16:24 crc kubenswrapper[5037]: E1126 14:16:24.907408 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.005365 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.005430 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.005439 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.005457 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.005468 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:25Z","lastTransitionTime":"2025-11-26T14:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.108927 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.108973 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.108983 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.109000 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.109010 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:25Z","lastTransitionTime":"2025-11-26T14:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.114610 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b18a6f09-7a1e-4965-81e2-dde847147b41-metrics-certs\") pod \"network-metrics-daemon-wjch9\" (UID: \"b18a6f09-7a1e-4965-81e2-dde847147b41\") " pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:16:25 crc kubenswrapper[5037]: E1126 14:16:25.114767 5037 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 14:16:25 crc kubenswrapper[5037]: E1126 14:16:25.114848 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b18a6f09-7a1e-4965-81e2-dde847147b41-metrics-certs podName:b18a6f09-7a1e-4965-81e2-dde847147b41 nodeName:}" failed. No retries permitted until 2025-11-26 14:16:41.114827373 +0000 UTC m=+67.911597567 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b18a6f09-7a1e-4965-81e2-dde847147b41-metrics-certs") pod "network-metrics-daemon-wjch9" (UID: "b18a6f09-7a1e-4965-81e2-dde847147b41") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.212806 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.212860 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.212871 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.212887 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.212902 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:25Z","lastTransitionTime":"2025-11-26T14:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.315187 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.315228 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.315237 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.315254 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.315272 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:25Z","lastTransitionTime":"2025-11-26T14:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.417924 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.417971 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.417982 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.418000 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.418014 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:25Z","lastTransitionTime":"2025-11-26T14:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.465802 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.479073 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.482518 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92b37f5a43045595441dda27ecce78e85a7172a9f0b9301b713e4f639388be9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0c3205634aaa1ec9add93ff4da5799da6c5f8702a91abaa5d6b52dfc77a0ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:25Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.498055 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:25Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.515411 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3393ec4-cc72-499a-8557-ec6ca329a142\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5654633d7930074536d4d9b179c36da442f07ef7e7e44c498c38ad51f21c4cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hn6x5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:25Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.519991 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.520023 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.520033 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.520050 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.520060 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:25Z","lastTransitionTime":"2025-11-26T14:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.530343 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-lxpjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"490e7d88-ae7f-45f9-ab12-598c33e3bc69\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5sbcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-lxpjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:25Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.542991 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"821d0155-28e9-4160-8885-aa8cc1d60197\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"t denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 14:15:53.421172 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 14:15:53.421210 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421215 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421220 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 14:15:53.421225 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 14:15:53.421228 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 14:15:53.421231 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 14:15:53.421248 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1126 14:15:53.426755 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3728845122/tls.crt::/tmp/serving-cert-3728845122/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764166537\\\\\\\\\\\\\\\" (2025-11-26 14:15:36 +0000 UTC to 2025-12-26 14:15:37 +0000 UTC (now=2025-11-26 14:15:53.426718319 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426903 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764166548\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764166547\\\\\\\\\\\\\\\" (2025-11-26 13:15:47 +0000 UTC to 2026-11-26 13:15:47 +0000 UTC (now=2025-11-26 14:15:53.426883943 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426929 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1126 14:15:53.426951 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nF1126 14:15:53.427030 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:25Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.555045 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:25Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.565700 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d163276317717346ec2c289a779a2784a200c0a4230bbcef92def1d1c55fcab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:25Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.582711 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bcabca26fad475e5fa46de4c0683cb4671a209cc69dbd1509f933cf799091e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae57cbd99d2dcba3594b74304119a4a8030da193dce32afd77079b3cfaf45713\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8jk2d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:25Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.611409 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"454ee6da-70e5-4d30-89e5-19a35123a278\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f1e8bfa4deb76c13528d9aa2414c14ba6cc0e4637f2cf84c153398b360cad4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://306a3ae23bf504e98f4e7be45cebf984a5dbf47fda9720237c881cf65de43b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0319889506261e48b8db06cd292ba17fb46399b0b2063670c5c0e179a801f9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86a35a51d679468b21fdd174d7148d46f8c1acddbae627ed5c27b61aa399b897\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf3e3bb0b0e0730b9bbd45aad381d5f38940c4a36676db5e9264ccb473f173f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feebe91c810ee2c7c5f9aefe54887ecbc31a89a83a03ac6bbac7f373e15752e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68b717ee0e14bec16d53d7823747258f8d3b9ef55dc27cc479637ddc9fe360e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68b717ee0e14bec16d53d7823747258f8d3b9ef55dc27cc479637ddc9fe360e3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T14:16:19Z\\\",\\\"message\\\":\\\"for services for network=default\\\\nI1126 14:16:19.926961 6706 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 14:16:19.927047 6706 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 14:16:19.927368 6706 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 14:16:19.927492 6706 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 14:16:19.927856 6706 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 14:16:19.927911 6706 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 14:16:19.927973 6706 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 14:16:19.927987 6706 factory.go:656] Stopping watch factory\\\\nI1126 14:16:19.928004 6706 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 14:16:19.928574 6706 ovnkube.go:599] Stopped ovnkube\\\\nI1126 14:16:19.928626 6706 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 14:16:19.928724 6706 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:19Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-fdhhj_openshift-ovn-kubernetes(454ee6da-70e5-4d30-89e5-19a35123a278)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3b5707990abcd8005bb71376bda6e0f62c32c806b11c5db27e0e06e5ca90c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fdhhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:25Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.623333 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.623403 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.623426 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.623453 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.623471 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:25Z","lastTransitionTime":"2025-11-26T14:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.632035 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2e7efb219e45d8ae5c49bd0dfaa921f6c02e4646ea234df0f8b1a3f50adab58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:25Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.643806 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8tjq6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f4a637d-4b3f-4289-a84c-cd2559430a0e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad15902c02983c178ab3ce11a5103fa144f6dd39fd78aa6243bf9babd10861e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mjhs8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8tjq6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:25Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.655465 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7bxxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd349c4b-e265-4484-ab92-b4328ebde7fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7d3a1479d3bf371e9b7b4bc4c57843ede4b11d782b732245781ec31e0da71f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrqz9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7bxxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:25Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.672217 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wjch9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b18a6f09-7a1e-4965-81e2-dde847147b41\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zp6hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zp6hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:16:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wjch9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:25Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.686914 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1755f26d-9772-47cd-9336-8c3e94febe60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af730a97f25a795f2f5f5a9b59a3c72868fd1d8f16a451fed1f7ce947779786e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10f95ff9c8fe951bea68ca3932581ecdcb55eee4f45bd79eeeb314fbd67ee80\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://222073fcbe74545f98ff4e8e05ced7ddc2e23933edff2e2135da7fbc33cfac57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:25Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.702307 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:25Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.718224 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cdzgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e677a13-ab89-4820-868f-ad848e66e4b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966a02276865593c5e0d10cb8b03dcfa5da44a3f1fe26a29d17c28c868157eb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j9kgj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1f5ee9860f5858603e973bab92f7eb597b2343eaadeda4e8c58ae962d61223\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j9kgj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:16:08Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cdzgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:25Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.726255 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.726327 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.726340 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.726361 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.726375 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:25Z","lastTransitionTime":"2025-11-26T14:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.821373 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.821562 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.821598 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:16:25 crc kubenswrapper[5037]: E1126 14:16:25.821652 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:16:57.821604138 +0000 UTC m=+84.618374362 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:16:25 crc kubenswrapper[5037]: E1126 14:16:25.821693 5037 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.821728 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:16:25 crc kubenswrapper[5037]: E1126 14:16:25.821744 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 14:16:57.821733991 +0000 UTC m=+84.618504175 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.821831 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:16:25 crc kubenswrapper[5037]: E1126 14:16:25.821870 5037 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 14:16:25 crc kubenswrapper[5037]: E1126 14:16:25.821941 5037 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 14:16:25 crc kubenswrapper[5037]: E1126 14:16:25.821967 5037 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 14:16:25 crc kubenswrapper[5037]: E1126 14:16:25.822024 5037 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 14:16:25 crc kubenswrapper[5037]: E1126 14:16:25.822066 5037 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 14:16:25 crc kubenswrapper[5037]: E1126 14:16:25.822158 5037 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 14:16:25 crc kubenswrapper[5037]: E1126 14:16:25.822192 5037 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 14:16:25 crc kubenswrapper[5037]: E1126 14:16:25.822089 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 14:16:57.822058959 +0000 UTC m=+84.618829173 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 14:16:25 crc kubenswrapper[5037]: E1126 14:16:25.822438 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 14:16:57.822351576 +0000 UTC m=+84.619121770 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 14:16:25 crc kubenswrapper[5037]: E1126 14:16:25.822480 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 14:16:57.822468589 +0000 UTC m=+84.619238783 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.830943 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.831139 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.831179 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.831258 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.831343 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:25Z","lastTransitionTime":"2025-11-26T14:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.907241 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.907253 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:16:25 crc kubenswrapper[5037]: E1126 14:16:25.907457 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.907501 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:16:25 crc kubenswrapper[5037]: E1126 14:16:25.907686 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:16:25 crc kubenswrapper[5037]: E1126 14:16:25.907877 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.935857 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.935935 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.935955 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.935979 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:25 crc kubenswrapper[5037]: I1126 14:16:25.935997 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:25Z","lastTransitionTime":"2025-11-26T14:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.039041 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.039087 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.039099 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.039119 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.039131 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:26Z","lastTransitionTime":"2025-11-26T14:16:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.143815 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.143861 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.143875 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.143895 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.143910 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:26Z","lastTransitionTime":"2025-11-26T14:16:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.246912 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.247000 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.247026 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.247059 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.247082 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:26Z","lastTransitionTime":"2025-11-26T14:16:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.350742 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.350809 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.350828 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.350858 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.350879 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:26Z","lastTransitionTime":"2025-11-26T14:16:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.454048 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.454114 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.454139 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.454170 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.454194 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:26Z","lastTransitionTime":"2025-11-26T14:16:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.557345 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.557428 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.557452 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.557492 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.557516 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:26Z","lastTransitionTime":"2025-11-26T14:16:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.660984 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.661051 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.661069 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.661098 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.661119 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:26Z","lastTransitionTime":"2025-11-26T14:16:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.765090 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.765156 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.765178 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.765205 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.765225 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:26Z","lastTransitionTime":"2025-11-26T14:16:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.868545 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.868605 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.868617 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.868640 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.868652 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:26Z","lastTransitionTime":"2025-11-26T14:16:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.907846 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:16:26 crc kubenswrapper[5037]: E1126 14:16:26.908105 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.971654 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.971770 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.971798 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.971836 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:26 crc kubenswrapper[5037]: I1126 14:16:26.971864 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:26Z","lastTransitionTime":"2025-11-26T14:16:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.075073 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.075145 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.075163 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.075190 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.075208 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:27Z","lastTransitionTime":"2025-11-26T14:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.178093 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.178158 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.178190 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.178218 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.178246 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:27Z","lastTransitionTime":"2025-11-26T14:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.282107 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.282172 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.282186 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.282208 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.282228 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:27Z","lastTransitionTime":"2025-11-26T14:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.386107 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.386148 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.386161 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.386177 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.386189 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:27Z","lastTransitionTime":"2025-11-26T14:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.489478 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.489560 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.489580 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.489608 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.489630 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:27Z","lastTransitionTime":"2025-11-26T14:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.593455 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.593537 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.593560 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.593587 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.593607 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:27Z","lastTransitionTime":"2025-11-26T14:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.696913 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.696987 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.697004 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.697029 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.697047 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:27Z","lastTransitionTime":"2025-11-26T14:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.800727 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.800791 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.800815 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.800849 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.800874 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:27Z","lastTransitionTime":"2025-11-26T14:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.904092 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.904168 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.904189 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.904218 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.904238 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:27Z","lastTransitionTime":"2025-11-26T14:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.907573 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.907653 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:16:27 crc kubenswrapper[5037]: E1126 14:16:27.907800 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:16:27 crc kubenswrapper[5037]: I1126 14:16:27.907603 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:16:27 crc kubenswrapper[5037]: E1126 14:16:27.908038 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:16:27 crc kubenswrapper[5037]: E1126 14:16:27.908195 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.007240 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.007321 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.007336 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.007357 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.007369 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:28Z","lastTransitionTime":"2025-11-26T14:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.110833 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.110895 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.110906 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.110930 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.110943 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:28Z","lastTransitionTime":"2025-11-26T14:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.213796 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.213868 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.213887 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.213920 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.213940 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:28Z","lastTransitionTime":"2025-11-26T14:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.316824 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.317093 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.317188 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.317321 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.317403 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:28Z","lastTransitionTime":"2025-11-26T14:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.420533 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.420956 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.421140 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.421271 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.421451 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:28Z","lastTransitionTime":"2025-11-26T14:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.524453 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.524877 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.525029 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.525198 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.525382 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:28Z","lastTransitionTime":"2025-11-26T14:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.629848 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.629923 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.629949 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.629983 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.630008 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:28Z","lastTransitionTime":"2025-11-26T14:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.734423 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.734475 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.734490 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.734508 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.734519 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:28Z","lastTransitionTime":"2025-11-26T14:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.837344 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.837420 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.837437 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.837464 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.837486 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:28Z","lastTransitionTime":"2025-11-26T14:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.907452 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:16:28 crc kubenswrapper[5037]: E1126 14:16:28.907650 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.940962 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.941028 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.941045 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.941073 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:28 crc kubenswrapper[5037]: I1126 14:16:28.941092 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:28Z","lastTransitionTime":"2025-11-26T14:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.044378 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.044464 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.044480 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.044500 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.044516 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:29Z","lastTransitionTime":"2025-11-26T14:16:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.148798 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.148861 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.148888 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.148913 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.148931 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:29Z","lastTransitionTime":"2025-11-26T14:16:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.251496 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.251848 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.251859 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.251880 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.251898 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:29Z","lastTransitionTime":"2025-11-26T14:16:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.354753 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.354815 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.354828 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.354848 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.354865 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:29Z","lastTransitionTime":"2025-11-26T14:16:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.457836 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.457897 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.457908 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.457927 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.457940 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:29Z","lastTransitionTime":"2025-11-26T14:16:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.561739 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.561812 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.561824 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.561845 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.561856 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:29Z","lastTransitionTime":"2025-11-26T14:16:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.664330 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.664373 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.664382 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.664399 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.664412 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:29Z","lastTransitionTime":"2025-11-26T14:16:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.766917 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.766948 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.766956 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.766972 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.766982 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:29Z","lastTransitionTime":"2025-11-26T14:16:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.870333 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.870383 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.870395 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.870417 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.870434 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:29Z","lastTransitionTime":"2025-11-26T14:16:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.908070 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.908070 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.908248 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:16:29 crc kubenswrapper[5037]: E1126 14:16:29.908536 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:16:29 crc kubenswrapper[5037]: E1126 14:16:29.908674 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:16:29 crc kubenswrapper[5037]: E1126 14:16:29.908819 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.973569 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.973615 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.973627 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.973647 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:29 crc kubenswrapper[5037]: I1126 14:16:29.973659 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:29Z","lastTransitionTime":"2025-11-26T14:16:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.076065 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.076096 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.076105 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.076121 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.076134 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:30Z","lastTransitionTime":"2025-11-26T14:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.178589 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.178634 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.178644 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.178660 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.178675 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:30Z","lastTransitionTime":"2025-11-26T14:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.280933 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.280978 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.280988 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.281003 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.281014 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:30Z","lastTransitionTime":"2025-11-26T14:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.383662 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.383706 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.383719 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.383737 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.383753 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:30Z","lastTransitionTime":"2025-11-26T14:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.486905 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.486953 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.486962 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.486979 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.486991 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:30Z","lastTransitionTime":"2025-11-26T14:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.589864 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.589937 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.589948 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.589969 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.589984 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:30Z","lastTransitionTime":"2025-11-26T14:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.693401 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.693472 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.693496 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.693525 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.693545 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:30Z","lastTransitionTime":"2025-11-26T14:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.796937 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.796989 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.797002 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.797027 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.797042 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:30Z","lastTransitionTime":"2025-11-26T14:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.900160 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.900268 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.900329 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.900362 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.900388 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:30Z","lastTransitionTime":"2025-11-26T14:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:30 crc kubenswrapper[5037]: I1126 14:16:30.907800 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:16:30 crc kubenswrapper[5037]: E1126 14:16:30.908026 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.004757 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.004829 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.004843 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.004887 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.004900 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:31Z","lastTransitionTime":"2025-11-26T14:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.019075 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.019178 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.019246 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.019281 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.019358 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:31Z","lastTransitionTime":"2025-11-26T14:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:31 crc kubenswrapper[5037]: E1126 14:16:31.038772 5037 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b247aecb-f60a-4360-9d1b-a1f9057dc4ca\\\",\\\"systemUUID\\\":\\\"4d169cbc-8c3f-42b1-afc1-3f5b57e5ed06\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:31Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.045081 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.045160 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.045185 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.045215 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.045235 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:31Z","lastTransitionTime":"2025-11-26T14:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:31 crc kubenswrapper[5037]: E1126 14:16:31.059648 5037 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b247aecb-f60a-4360-9d1b-a1f9057dc4ca\\\",\\\"systemUUID\\\":\\\"4d169cbc-8c3f-42b1-afc1-3f5b57e5ed06\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:31Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.065738 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.065790 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.065801 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.065822 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.065840 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:31Z","lastTransitionTime":"2025-11-26T14:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:31 crc kubenswrapper[5037]: E1126 14:16:31.086427 5037 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b247aecb-f60a-4360-9d1b-a1f9057dc4ca\\\",\\\"systemUUID\\\":\\\"4d169cbc-8c3f-42b1-afc1-3f5b57e5ed06\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:31Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.092016 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.092507 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.092659 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.092813 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.092956 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:31Z","lastTransitionTime":"2025-11-26T14:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:31 crc kubenswrapper[5037]: E1126 14:16:31.108884 5037 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b247aecb-f60a-4360-9d1b-a1f9057dc4ca\\\",\\\"systemUUID\\\":\\\"4d169cbc-8c3f-42b1-afc1-3f5b57e5ed06\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:31Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.113337 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.113383 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.113402 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.113427 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.113444 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:31Z","lastTransitionTime":"2025-11-26T14:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:31 crc kubenswrapper[5037]: E1126 14:16:31.128904 5037 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b247aecb-f60a-4360-9d1b-a1f9057dc4ca\\\",\\\"systemUUID\\\":\\\"4d169cbc-8c3f-42b1-afc1-3f5b57e5ed06\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:31Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:31 crc kubenswrapper[5037]: E1126 14:16:31.129146 5037 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.134026 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.134092 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.134107 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.134129 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.134143 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:31Z","lastTransitionTime":"2025-11-26T14:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.238059 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.238163 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.238217 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.238248 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.238350 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:31Z","lastTransitionTime":"2025-11-26T14:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.345323 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.345368 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.345381 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.345399 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.345412 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:31Z","lastTransitionTime":"2025-11-26T14:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.448716 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.448755 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.448766 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.448782 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.448794 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:31Z","lastTransitionTime":"2025-11-26T14:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.552004 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.552109 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.552129 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.552161 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.552180 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:31Z","lastTransitionTime":"2025-11-26T14:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.655339 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.655382 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.655390 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.655406 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.655417 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:31Z","lastTransitionTime":"2025-11-26T14:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.758708 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.758781 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.758797 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.758821 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.758836 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:31Z","lastTransitionTime":"2025-11-26T14:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.861772 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.861856 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.861876 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.861906 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.861926 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:31Z","lastTransitionTime":"2025-11-26T14:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.907583 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.907670 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.907541 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:16:31 crc kubenswrapper[5037]: E1126 14:16:31.908522 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:16:31 crc kubenswrapper[5037]: E1126 14:16:31.907858 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:16:31 crc kubenswrapper[5037]: E1126 14:16:31.908659 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.965466 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.965528 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.965546 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.965567 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:31 crc kubenswrapper[5037]: I1126 14:16:31.965587 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:31Z","lastTransitionTime":"2025-11-26T14:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.069549 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.069631 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.069655 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.069688 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.069709 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:32Z","lastTransitionTime":"2025-11-26T14:16:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.174235 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.174360 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.174374 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.174400 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.174418 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:32Z","lastTransitionTime":"2025-11-26T14:16:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.278242 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.278338 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.278350 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.278373 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.278386 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:32Z","lastTransitionTime":"2025-11-26T14:16:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.382736 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.382789 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.382803 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.382827 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.382841 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:32Z","lastTransitionTime":"2025-11-26T14:16:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.486393 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.486435 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.486446 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.486461 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.486473 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:32Z","lastTransitionTime":"2025-11-26T14:16:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.588589 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.588630 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.588647 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.588668 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.588680 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:32Z","lastTransitionTime":"2025-11-26T14:16:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.690918 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.691281 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.691377 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.691453 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.691535 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:32Z","lastTransitionTime":"2025-11-26T14:16:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.795413 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.795456 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.795467 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.795486 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.795500 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:32Z","lastTransitionTime":"2025-11-26T14:16:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.898830 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.898906 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.898959 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.898988 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.899007 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:32Z","lastTransitionTime":"2025-11-26T14:16:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:32 crc kubenswrapper[5037]: I1126 14:16:32.908112 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:16:32 crc kubenswrapper[5037]: E1126 14:16:32.908361 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.002507 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.002561 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.002570 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.002590 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.002604 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:33Z","lastTransitionTime":"2025-11-26T14:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.106073 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.107322 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.107520 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.107686 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.107824 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:33Z","lastTransitionTime":"2025-11-26T14:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.212089 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.212176 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.212199 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.212231 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.212250 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:33Z","lastTransitionTime":"2025-11-26T14:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.314865 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.314905 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.314914 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.314929 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.314941 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:33Z","lastTransitionTime":"2025-11-26T14:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.417330 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.417416 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.417439 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.417468 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.417487 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:33Z","lastTransitionTime":"2025-11-26T14:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.521349 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.521406 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.521417 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.521436 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.521448 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:33Z","lastTransitionTime":"2025-11-26T14:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.623836 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.623883 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.623896 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.623914 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.623928 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:33Z","lastTransitionTime":"2025-11-26T14:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.727729 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.727808 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.727836 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.727865 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:33 crc kubenswrapper[5037]: I1126 14:16:33.727883 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:33Z","lastTransitionTime":"2025-11-26T14:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.183692 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.183743 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.183752 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.183769 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.183780 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:34Z","lastTransitionTime":"2025-11-26T14:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.184335 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.184356 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.184433 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:16:34 crc kubenswrapper[5037]: E1126 14:16:34.184438 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.185083 5037 scope.go:117] "RemoveContainer" containerID="68b717ee0e14bec16d53d7823747258f8d3b9ef55dc27cc479637ddc9fe360e3" Nov 26 14:16:34 crc kubenswrapper[5037]: E1126 14:16:34.185375 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-fdhhj_openshift-ovn-kubernetes(454ee6da-70e5-4d30-89e5-19a35123a278)\"" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" Nov 26 14:16:34 crc kubenswrapper[5037]: E1126 14:16:34.185527 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:16:34 crc kubenswrapper[5037]: E1126 14:16:34.185594 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.233710 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1755f26d-9772-47cd-9336-8c3e94febe60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af730a97f25a795f2f5f5a9b59a3c72868fd1d8f16a451fed1f7ce947779786e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10f95ff9c8fe951bea68ca3932581ecdcb55eee4f45bd79eeeb314fbd67ee80\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://222073fcbe74545f98ff4e8e05ced7ddc2e23933edff2e2135da7fbc33cfac57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:34Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.253417 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:34Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.269267 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cdzgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e677a13-ab89-4820-868f-ad848e66e4b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966a02276865593c5e0d10cb8b03dcfa5da44a3f1fe26a29d17c28c868157eb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j9kgj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1f5ee9860f5858603e973bab92f7eb597b2343eaadeda4e8c58ae962d61223\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j9kgj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:16:08Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cdzgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:34Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.286612 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.286678 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.286691 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.286712 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.286726 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:34Z","lastTransitionTime":"2025-11-26T14:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.287729 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92b37f5a43045595441dda27ecce78e85a7172a9f0b9301b713e4f639388be9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0c3205634aaa1ec9add93ff4da5799da6c5f8702a91abaa5d6b52dfc77a0ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:34Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.302085 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:34Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.319473 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3393ec4-cc72-499a-8557-ec6ca329a142\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5654633d7930074536d4d9b179c36da442f07ef7e7e44c498c38ad51f21c4cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hn6x5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:34Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.334445 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-lxpjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"490e7d88-ae7f-45f9-ab12-598c33e3bc69\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5sbcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-lxpjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:34Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.356564 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"454ee6da-70e5-4d30-89e5-19a35123a278\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f1e8bfa4deb76c13528d9aa2414c14ba6cc0e4637f2cf84c153398b360cad4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://306a3ae23bf504e98f4e7be45cebf984a5dbf47fda9720237c881cf65de43b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0319889506261e48b8db06cd292ba17fb46399b0b2063670c5c0e179a801f9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86a35a51d679468b21fdd174d7148d46f8c1acddbae627ed5c27b61aa399b897\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf3e3bb0b0e0730b9bbd45aad381d5f38940c4a36676db5e9264ccb473f173f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feebe91c810ee2c7c5f9aefe54887ecbc31a89a83a03ac6bbac7f373e15752e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68b717ee0e14bec16d53d7823747258f8d3b9ef55dc27cc479637ddc9fe360e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68b717ee0e14bec16d53d7823747258f8d3b9ef55dc27cc479637ddc9fe360e3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T14:16:19Z\\\",\\\"message\\\":\\\"for services for network=default\\\\nI1126 14:16:19.926961 6706 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 14:16:19.927047 6706 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 14:16:19.927368 6706 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 14:16:19.927492 6706 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 14:16:19.927856 6706 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 14:16:19.927911 6706 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 14:16:19.927973 6706 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 14:16:19.927987 6706 factory.go:656] Stopping watch factory\\\\nI1126 14:16:19.928004 6706 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 14:16:19.928574 6706 ovnkube.go:599] Stopped ovnkube\\\\nI1126 14:16:19.928626 6706 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 14:16:19.928724 6706 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:19Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-fdhhj_openshift-ovn-kubernetes(454ee6da-70e5-4d30-89e5-19a35123a278)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3b5707990abcd8005bb71376bda6e0f62c32c806b11c5db27e0e06e5ca90c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fdhhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:34Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.372439 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"821d0155-28e9-4160-8885-aa8cc1d60197\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"t denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 14:15:53.421172 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 14:15:53.421210 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421215 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421220 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 14:15:53.421225 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 14:15:53.421228 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 14:15:53.421231 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 14:15:53.421248 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1126 14:15:53.426755 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3728845122/tls.crt::/tmp/serving-cert-3728845122/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764166537\\\\\\\\\\\\\\\" (2025-11-26 14:15:36 +0000 UTC to 2025-12-26 14:15:37 +0000 UTC (now=2025-11-26 14:15:53.426718319 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426903 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764166548\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764166547\\\\\\\\\\\\\\\" (2025-11-26 13:15:47 +0000 UTC to 2026-11-26 13:15:47 +0000 UTC (now=2025-11-26 14:15:53.426883943 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426929 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1126 14:15:53.426951 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nF1126 14:15:53.427030 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:34Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.389465 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.389503 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.389515 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.389534 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.389548 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:34Z","lastTransitionTime":"2025-11-26T14:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.389544 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:34Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.403352 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d163276317717346ec2c289a779a2784a200c0a4230bbcef92def1d1c55fcab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:34Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.415855 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bcabca26fad475e5fa46de4c0683cb4671a209cc69dbd1509f933cf799091e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae57cbd99d2dcba3594b74304119a4a8030da193dce32afd77079b3cfaf45713\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8jk2d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:34Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.427205 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wjch9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b18a6f09-7a1e-4965-81e2-dde847147b41\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zp6hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zp6hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:16:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wjch9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:34Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.439465 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dd1172bb-f8ba-452e-9438-1e4e064466fb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b728895e4ecd26bbe5587512878f5dfb72643d07acc38dccecdf55d9369d1811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75e4e0ccd9d317e18bd7f97c06cdc5d2bcb53c2de228f3619c894d964304770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30b88b73e6299d048160f3e7b1698df43e63aa1dc98e86f8472bc47994019f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://862c88c8d26bb3f4d41d277c4df81162f98e6f27a1e191d4fe45c2c29eef6612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://862c88c8d26bb3f4d41d277c4df81162f98e6f27a1e191d4fe45c2c29eef6612\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:34Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.452647 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2e7efb219e45d8ae5c49bd0dfaa921f6c02e4646ea234df0f8b1a3f50adab58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:34Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.465080 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8tjq6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f4a637d-4b3f-4289-a84c-cd2559430a0e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad15902c02983c178ab3ce11a5103fa144f6dd39fd78aa6243bf9babd10861e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mjhs8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8tjq6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:34Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.474025 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7bxxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd349c4b-e265-4484-ab92-b4328ebde7fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7d3a1479d3bf371e9b7b4bc4c57843ede4b11d782b732245781ec31e0da71f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrqz9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7bxxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:34Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.493138 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.493185 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.493194 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.493215 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.493226 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:34Z","lastTransitionTime":"2025-11-26T14:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.597103 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.597187 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.597213 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.597248 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.597272 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:34Z","lastTransitionTime":"2025-11-26T14:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.700273 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.700355 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.700372 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.700397 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.700413 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:34Z","lastTransitionTime":"2025-11-26T14:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.803564 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.803611 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.803622 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.803643 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.803657 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:34Z","lastTransitionTime":"2025-11-26T14:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.906579 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.906630 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.906656 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.906685 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.906711 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:34Z","lastTransitionTime":"2025-11-26T14:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:34 crc kubenswrapper[5037]: I1126 14:16:34.907177 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:16:34 crc kubenswrapper[5037]: E1126 14:16:34.907362 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.010085 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.010168 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.010191 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.010239 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.010269 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:35Z","lastTransitionTime":"2025-11-26T14:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.114580 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.114627 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.114663 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.114684 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.114695 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:35Z","lastTransitionTime":"2025-11-26T14:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.217722 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.217761 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.217770 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.217789 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.217801 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:35Z","lastTransitionTime":"2025-11-26T14:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.320928 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.321155 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.321178 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.321197 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.321209 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:35Z","lastTransitionTime":"2025-11-26T14:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.423836 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.423926 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.423936 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.423954 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.423966 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:35Z","lastTransitionTime":"2025-11-26T14:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.528166 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.528224 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.528234 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.528254 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.528265 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:35Z","lastTransitionTime":"2025-11-26T14:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.631600 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.631729 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.631796 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.631827 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.631896 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:35Z","lastTransitionTime":"2025-11-26T14:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.736906 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.736964 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.736982 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.737013 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.737034 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:35Z","lastTransitionTime":"2025-11-26T14:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.841137 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.841197 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.841207 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.841228 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.841240 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:35Z","lastTransitionTime":"2025-11-26T14:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.907756 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.907873 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.907757 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:16:35 crc kubenswrapper[5037]: E1126 14:16:35.908034 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:16:35 crc kubenswrapper[5037]: E1126 14:16:35.908141 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:16:35 crc kubenswrapper[5037]: E1126 14:16:35.908251 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.944472 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.944522 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.944533 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.944553 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:35 crc kubenswrapper[5037]: I1126 14:16:35.944563 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:35Z","lastTransitionTime":"2025-11-26T14:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.048046 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.048110 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.048127 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.048154 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.048172 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:36Z","lastTransitionTime":"2025-11-26T14:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.153408 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.153459 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.153473 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.153494 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.153510 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:36Z","lastTransitionTime":"2025-11-26T14:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.256408 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.256460 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.256472 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.256492 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.256504 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:36Z","lastTransitionTime":"2025-11-26T14:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.360118 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.360187 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.360236 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.360260 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.360316 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:36Z","lastTransitionTime":"2025-11-26T14:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.470833 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.470926 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.470954 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.470988 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.471014 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:36Z","lastTransitionTime":"2025-11-26T14:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.573927 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.573985 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.573997 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.574020 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.574033 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:36Z","lastTransitionTime":"2025-11-26T14:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.677629 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.677757 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.677826 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.677866 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.677940 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:36Z","lastTransitionTime":"2025-11-26T14:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.780134 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.780173 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.780187 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.780204 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.780213 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:36Z","lastTransitionTime":"2025-11-26T14:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.882552 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.882586 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.882596 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.882613 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.882623 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:36Z","lastTransitionTime":"2025-11-26T14:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.907152 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:16:36 crc kubenswrapper[5037]: E1126 14:16:36.907435 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.986222 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.986308 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.986335 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.986358 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:36 crc kubenswrapper[5037]: I1126 14:16:36.986375 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:36Z","lastTransitionTime":"2025-11-26T14:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.089729 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.089780 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.089796 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.089818 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.089831 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:37Z","lastTransitionTime":"2025-11-26T14:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.194326 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.194389 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.194403 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.194453 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.194476 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:37Z","lastTransitionTime":"2025-11-26T14:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.297799 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.297833 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.297843 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.297857 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.297866 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:37Z","lastTransitionTime":"2025-11-26T14:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.401404 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.401504 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.401519 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.401542 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.401559 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:37Z","lastTransitionTime":"2025-11-26T14:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.504939 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.504988 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.505000 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.505024 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.505034 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:37Z","lastTransitionTime":"2025-11-26T14:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.608528 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.608608 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.608633 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.608672 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.608699 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:37Z","lastTransitionTime":"2025-11-26T14:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.712315 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.712394 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.712413 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.712443 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.712462 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:37Z","lastTransitionTime":"2025-11-26T14:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.816045 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.816095 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.816105 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.816127 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.816141 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:37Z","lastTransitionTime":"2025-11-26T14:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.908075 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.908126 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.908076 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:16:37 crc kubenswrapper[5037]: E1126 14:16:37.908227 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:16:37 crc kubenswrapper[5037]: E1126 14:16:37.908338 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:16:37 crc kubenswrapper[5037]: E1126 14:16:37.908560 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.918949 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.918997 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.919009 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.919026 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:37 crc kubenswrapper[5037]: I1126 14:16:37.919048 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:37Z","lastTransitionTime":"2025-11-26T14:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.022086 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.022143 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.022156 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.022180 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.022195 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:38Z","lastTransitionTime":"2025-11-26T14:16:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.124922 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.124994 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.125009 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.125039 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.125058 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:38Z","lastTransitionTime":"2025-11-26T14:16:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.228344 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.228392 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.228405 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.228427 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.228442 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:38Z","lastTransitionTime":"2025-11-26T14:16:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.331195 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.331254 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.331265 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.331311 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.331326 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:38Z","lastTransitionTime":"2025-11-26T14:16:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.434494 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.434666 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.434683 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.434706 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.434721 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:38Z","lastTransitionTime":"2025-11-26T14:16:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.539059 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.539097 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.539107 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.539122 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.539132 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:38Z","lastTransitionTime":"2025-11-26T14:16:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.642353 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.642392 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.642404 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.642424 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.642437 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:38Z","lastTransitionTime":"2025-11-26T14:16:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.744605 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.744641 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.744650 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.744665 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.744677 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:38Z","lastTransitionTime":"2025-11-26T14:16:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.847542 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.847596 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.847609 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.847627 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.847639 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:38Z","lastTransitionTime":"2025-11-26T14:16:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.908047 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:16:38 crc kubenswrapper[5037]: E1126 14:16:38.908215 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.949863 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.949925 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.949938 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.949957 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:38 crc kubenswrapper[5037]: I1126 14:16:38.949971 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:38Z","lastTransitionTime":"2025-11-26T14:16:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.053783 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.053862 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.053902 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.053934 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.053955 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:39Z","lastTransitionTime":"2025-11-26T14:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.156672 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.156736 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.156747 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.156766 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.156783 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:39Z","lastTransitionTime":"2025-11-26T14:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.260518 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.260577 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.260588 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.260610 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.260623 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:39Z","lastTransitionTime":"2025-11-26T14:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.362872 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.362940 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.362951 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.362967 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.362977 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:39Z","lastTransitionTime":"2025-11-26T14:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.466557 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.466625 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.466638 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.466660 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.466679 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:39Z","lastTransitionTime":"2025-11-26T14:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.569769 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.569813 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.569822 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.569840 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.569854 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:39Z","lastTransitionTime":"2025-11-26T14:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.673040 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.673097 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.673109 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.673132 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.673145 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:39Z","lastTransitionTime":"2025-11-26T14:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.775922 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.775960 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.775969 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.775985 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.775995 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:39Z","lastTransitionTime":"2025-11-26T14:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.879341 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.879408 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.879422 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.879444 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.879459 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:39Z","lastTransitionTime":"2025-11-26T14:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.907704 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.907704 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.907851 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:16:39 crc kubenswrapper[5037]: E1126 14:16:39.907996 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:16:39 crc kubenswrapper[5037]: E1126 14:16:39.908103 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:16:39 crc kubenswrapper[5037]: E1126 14:16:39.908190 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.982957 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.983032 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.983045 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.983065 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:39 crc kubenswrapper[5037]: I1126 14:16:39.983077 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:39Z","lastTransitionTime":"2025-11-26T14:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.086237 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.086306 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.086319 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.086337 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.086350 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:40Z","lastTransitionTime":"2025-11-26T14:16:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.191262 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.191598 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.191619 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.191645 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.191662 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:40Z","lastTransitionTime":"2025-11-26T14:16:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.294972 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.295062 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.295082 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.295115 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.295135 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:40Z","lastTransitionTime":"2025-11-26T14:16:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.398076 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.398130 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.398142 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.398166 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.398180 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:40Z","lastTransitionTime":"2025-11-26T14:16:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.501351 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.501405 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.501418 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.501440 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.501455 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:40Z","lastTransitionTime":"2025-11-26T14:16:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.604240 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.604314 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.604326 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.604347 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.604359 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:40Z","lastTransitionTime":"2025-11-26T14:16:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.707444 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.707506 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.707520 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.707547 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.707564 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:40Z","lastTransitionTime":"2025-11-26T14:16:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.810553 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.810618 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.810639 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.810665 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.810679 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:40Z","lastTransitionTime":"2025-11-26T14:16:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.907549 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:16:40 crc kubenswrapper[5037]: E1126 14:16:40.907726 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.913494 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.913535 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.913546 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.913562 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:40 crc kubenswrapper[5037]: I1126 14:16:40.913571 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:40Z","lastTransitionTime":"2025-11-26T14:16:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.016045 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.016102 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.016112 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.016132 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.016146 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:41Z","lastTransitionTime":"2025-11-26T14:16:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.119431 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.119467 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.119477 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.119492 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.119505 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:41Z","lastTransitionTime":"2025-11-26T14:16:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.153095 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b18a6f09-7a1e-4965-81e2-dde847147b41-metrics-certs\") pod \"network-metrics-daemon-wjch9\" (UID: \"b18a6f09-7a1e-4965-81e2-dde847147b41\") " pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:16:41 crc kubenswrapper[5037]: E1126 14:16:41.153367 5037 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 14:16:41 crc kubenswrapper[5037]: E1126 14:16:41.153503 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b18a6f09-7a1e-4965-81e2-dde847147b41-metrics-certs podName:b18a6f09-7a1e-4965-81e2-dde847147b41 nodeName:}" failed. No retries permitted until 2025-11-26 14:17:13.153473912 +0000 UTC m=+99.950244096 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b18a6f09-7a1e-4965-81e2-dde847147b41-metrics-certs") pod "network-metrics-daemon-wjch9" (UID: "b18a6f09-7a1e-4965-81e2-dde847147b41") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.221839 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.221903 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.221920 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.221945 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.221964 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:41Z","lastTransitionTime":"2025-11-26T14:16:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.325045 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.325110 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.325124 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.325146 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.325161 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:41Z","lastTransitionTime":"2025-11-26T14:16:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.428162 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.428202 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.428216 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.428234 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.428246 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:41Z","lastTransitionTime":"2025-11-26T14:16:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.446708 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.446772 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.446796 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.446826 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.446853 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:41Z","lastTransitionTime":"2025-11-26T14:16:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:41 crc kubenswrapper[5037]: E1126 14:16:41.465333 5037 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b247aecb-f60a-4360-9d1b-a1f9057dc4ca\\\",\\\"systemUUID\\\":\\\"4d169cbc-8c3f-42b1-afc1-3f5b57e5ed06\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:41Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.470343 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.470471 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.470543 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.470622 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.470696 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:41Z","lastTransitionTime":"2025-11-26T14:16:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:41 crc kubenswrapper[5037]: E1126 14:16:41.483065 5037 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b247aecb-f60a-4360-9d1b-a1f9057dc4ca\\\",\\\"systemUUID\\\":\\\"4d169cbc-8c3f-42b1-afc1-3f5b57e5ed06\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:41Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.486495 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.486527 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.486540 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.486559 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.486570 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:41Z","lastTransitionTime":"2025-11-26T14:16:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:41 crc kubenswrapper[5037]: E1126 14:16:41.500207 5037 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b247aecb-f60a-4360-9d1b-a1f9057dc4ca\\\",\\\"systemUUID\\\":\\\"4d169cbc-8c3f-42b1-afc1-3f5b57e5ed06\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:41Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.504445 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.504597 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.504681 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.504785 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.504856 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:41Z","lastTransitionTime":"2025-11-26T14:16:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:41 crc kubenswrapper[5037]: E1126 14:16:41.516937 5037 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b247aecb-f60a-4360-9d1b-a1f9057dc4ca\\\",\\\"systemUUID\\\":\\\"4d169cbc-8c3f-42b1-afc1-3f5b57e5ed06\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:41Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.520569 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.520604 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.520617 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.520638 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.520650 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:41Z","lastTransitionTime":"2025-11-26T14:16:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:41 crc kubenswrapper[5037]: E1126 14:16:41.534767 5037 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b247aecb-f60a-4360-9d1b-a1f9057dc4ca\\\",\\\"systemUUID\\\":\\\"4d169cbc-8c3f-42b1-afc1-3f5b57e5ed06\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:41Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:41 crc kubenswrapper[5037]: E1126 14:16:41.534982 5037 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.537049 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.537075 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.537088 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.537106 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.537318 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:41Z","lastTransitionTime":"2025-11-26T14:16:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.639563 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.639602 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.639615 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.639632 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.639643 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:41Z","lastTransitionTime":"2025-11-26T14:16:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.742037 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.742074 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.742087 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.742104 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.742116 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:41Z","lastTransitionTime":"2025-11-26T14:16:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.844664 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.844720 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.844733 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.844751 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.844764 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:41Z","lastTransitionTime":"2025-11-26T14:16:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.908093 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:16:41 crc kubenswrapper[5037]: E1126 14:16:41.908359 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.908416 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.908633 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:16:41 crc kubenswrapper[5037]: E1126 14:16:41.908678 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:16:41 crc kubenswrapper[5037]: E1126 14:16:41.908875 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.947559 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.947599 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.947608 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.947629 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:41 crc kubenswrapper[5037]: I1126 14:16:41.947646 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:41Z","lastTransitionTime":"2025-11-26T14:16:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.050621 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.051100 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.051253 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.051443 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.051695 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:42Z","lastTransitionTime":"2025-11-26T14:16:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.154809 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.155306 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.155506 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.155661 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.155821 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:42Z","lastTransitionTime":"2025-11-26T14:16:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.258456 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.258850 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.258937 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.259016 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.259086 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:42Z","lastTransitionTime":"2025-11-26T14:16:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.362263 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.362355 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.362367 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.362388 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.362401 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:42Z","lastTransitionTime":"2025-11-26T14:16:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.465511 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.465567 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.465578 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.465597 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.465936 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:42Z","lastTransitionTime":"2025-11-26T14:16:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.568346 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.568410 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.568426 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.568447 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.568730 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:42Z","lastTransitionTime":"2025-11-26T14:16:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.674676 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.675530 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.676115 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.676192 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.676253 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:42Z","lastTransitionTime":"2025-11-26T14:16:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.779398 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.779457 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.779476 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.779504 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.779519 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:42Z","lastTransitionTime":"2025-11-26T14:16:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.882784 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.882833 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.882848 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.882866 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.882883 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:42Z","lastTransitionTime":"2025-11-26T14:16:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.907204 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:16:42 crc kubenswrapper[5037]: E1126 14:16:42.907391 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.985251 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.985414 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.985426 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.985474 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:42 crc kubenswrapper[5037]: I1126 14:16:42.985485 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:42Z","lastTransitionTime":"2025-11-26T14:16:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.088601 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.090731 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.090805 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.090890 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.090976 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:43Z","lastTransitionTime":"2025-11-26T14:16:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.194202 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.194589 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.194665 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.194736 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.194802 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:43Z","lastTransitionTime":"2025-11-26T14:16:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.297604 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.297684 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.297702 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.297725 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.297738 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:43Z","lastTransitionTime":"2025-11-26T14:16:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.401191 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.401265 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.401279 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.401323 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.401342 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:43Z","lastTransitionTime":"2025-11-26T14:16:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.456714 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-lxpjp_490e7d88-ae7f-45f9-ab12-598c33e3bc69/kube-multus/0.log" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.456778 5037 generic.go:334] "Generic (PLEG): container finished" podID="490e7d88-ae7f-45f9-ab12-598c33e3bc69" containerID="a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2" exitCode=1 Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.456815 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-lxpjp" event={"ID":"490e7d88-ae7f-45f9-ab12-598c33e3bc69","Type":"ContainerDied","Data":"a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2"} Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.457281 5037 scope.go:117] "RemoveContainer" containerID="a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.484501 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wjch9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b18a6f09-7a1e-4965-81e2-dde847147b41\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zp6hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zp6hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:16:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wjch9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:43Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.499348 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dd1172bb-f8ba-452e-9438-1e4e064466fb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b728895e4ecd26bbe5587512878f5dfb72643d07acc38dccecdf55d9369d1811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75e4e0ccd9d317e18bd7f97c06cdc5d2bcb53c2de228f3619c894d964304770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30b88b73e6299d048160f3e7b1698df43e63aa1dc98e86f8472bc47994019f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://862c88c8d26bb3f4d41d277c4df81162f98e6f27a1e191d4fe45c2c29eef6612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://862c88c8d26bb3f4d41d277c4df81162f98e6f27a1e191d4fe45c2c29eef6612\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:43Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.504354 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.504400 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.504411 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.504431 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.504443 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:43Z","lastTransitionTime":"2025-11-26T14:16:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.516065 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2e7efb219e45d8ae5c49bd0dfaa921f6c02e4646ea234df0f8b1a3f50adab58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:43Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.530715 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8tjq6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f4a637d-4b3f-4289-a84c-cd2559430a0e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad15902c02983c178ab3ce11a5103fa144f6dd39fd78aa6243bf9babd10861e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mjhs8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8tjq6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:43Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.543534 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7bxxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd349c4b-e265-4484-ab92-b4328ebde7fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7d3a1479d3bf371e9b7b4bc4c57843ede4b11d782b732245781ec31e0da71f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrqz9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7bxxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:43Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.559012 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1755f26d-9772-47cd-9336-8c3e94febe60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af730a97f25a795f2f5f5a9b59a3c72868fd1d8f16a451fed1f7ce947779786e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10f95ff9c8fe951bea68ca3932581ecdcb55eee4f45bd79eeeb314fbd67ee80\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://222073fcbe74545f98ff4e8e05ced7ddc2e23933edff2e2135da7fbc33cfac57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:43Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.572194 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:43Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.587045 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cdzgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e677a13-ab89-4820-868f-ad848e66e4b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966a02276865593c5e0d10cb8b03dcfa5da44a3f1fe26a29d17c28c868157eb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j9kgj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1f5ee9860f5858603e973bab92f7eb597b2343eaadeda4e8c58ae962d61223\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j9kgj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:16:08Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cdzgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:43Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.607072 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92b37f5a43045595441dda27ecce78e85a7172a9f0b9301b713e4f639388be9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0c3205634aaa1ec9add93ff4da5799da6c5f8702a91abaa5d6b52dfc77a0ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:43Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.607379 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.607405 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.607415 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.607436 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.607448 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:43Z","lastTransitionTime":"2025-11-26T14:16:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.619553 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:43Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.634463 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3393ec4-cc72-499a-8557-ec6ca329a142\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5654633d7930074536d4d9b179c36da442f07ef7e7e44c498c38ad51f21c4cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hn6x5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:43Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.650927 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-lxpjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"490e7d88-ae7f-45f9-ab12-598c33e3bc69\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T14:16:43Z\\\",\\\"message\\\":\\\"2025-11-26T14:15:57+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_a0e93ef9-592c-4dc8-9f70-00f3462cdb7a\\\\n2025-11-26T14:15:57+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_a0e93ef9-592c-4dc8-9f70-00f3462cdb7a to /host/opt/cni/bin/\\\\n2025-11-26T14:15:57Z [verbose] multus-daemon started\\\\n2025-11-26T14:15:57Z [verbose] Readiness Indicator file check\\\\n2025-11-26T14:16:42Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5sbcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-lxpjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:43Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.671478 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"454ee6da-70e5-4d30-89e5-19a35123a278\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f1e8bfa4deb76c13528d9aa2414c14ba6cc0e4637f2cf84c153398b360cad4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://306a3ae23bf504e98f4e7be45cebf984a5dbf47fda9720237c881cf65de43b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0319889506261e48b8db06cd292ba17fb46399b0b2063670c5c0e179a801f9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86a35a51d679468b21fdd174d7148d46f8c1acddbae627ed5c27b61aa399b897\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf3e3bb0b0e0730b9bbd45aad381d5f38940c4a36676db5e9264ccb473f173f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feebe91c810ee2c7c5f9aefe54887ecbc31a89a83a03ac6bbac7f373e15752e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68b717ee0e14bec16d53d7823747258f8d3b9ef55dc27cc479637ddc9fe360e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68b717ee0e14bec16d53d7823747258f8d3b9ef55dc27cc479637ddc9fe360e3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T14:16:19Z\\\",\\\"message\\\":\\\"for services for network=default\\\\nI1126 14:16:19.926961 6706 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 14:16:19.927047 6706 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 14:16:19.927368 6706 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 14:16:19.927492 6706 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 14:16:19.927856 6706 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 14:16:19.927911 6706 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 14:16:19.927973 6706 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 14:16:19.927987 6706 factory.go:656] Stopping watch factory\\\\nI1126 14:16:19.928004 6706 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 14:16:19.928574 6706 ovnkube.go:599] Stopped ovnkube\\\\nI1126 14:16:19.928626 6706 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 14:16:19.928724 6706 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:19Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-fdhhj_openshift-ovn-kubernetes(454ee6da-70e5-4d30-89e5-19a35123a278)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3b5707990abcd8005bb71376bda6e0f62c32c806b11c5db27e0e06e5ca90c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fdhhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:43Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.688093 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"821d0155-28e9-4160-8885-aa8cc1d60197\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"t denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 14:15:53.421172 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 14:15:53.421210 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421215 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421220 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 14:15:53.421225 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 14:15:53.421228 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 14:15:53.421231 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 14:15:53.421248 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1126 14:15:53.426755 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3728845122/tls.crt::/tmp/serving-cert-3728845122/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764166537\\\\\\\\\\\\\\\" (2025-11-26 14:15:36 +0000 UTC to 2025-12-26 14:15:37 +0000 UTC (now=2025-11-26 14:15:53.426718319 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426903 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764166548\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764166547\\\\\\\\\\\\\\\" (2025-11-26 13:15:47 +0000 UTC to 2026-11-26 13:15:47 +0000 UTC (now=2025-11-26 14:15:53.426883943 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426929 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1126 14:15:53.426951 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nF1126 14:15:53.427030 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:43Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.701672 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:43Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.710908 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.710950 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.710964 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.710979 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.710989 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:43Z","lastTransitionTime":"2025-11-26T14:16:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.714215 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d163276317717346ec2c289a779a2784a200c0a4230bbcef92def1d1c55fcab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:43Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.726754 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bcabca26fad475e5fa46de4c0683cb4671a209cc69dbd1509f933cf799091e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae57cbd99d2dcba3594b74304119a4a8030da193dce32afd77079b3cfaf45713\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8jk2d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:43Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.814485 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.814543 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.814560 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.814585 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.814601 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:43Z","lastTransitionTime":"2025-11-26T14:16:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.908468 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:16:43 crc kubenswrapper[5037]: E1126 14:16:43.908674 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.908736 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.908760 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:16:43 crc kubenswrapper[5037]: E1126 14:16:43.908870 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:16:43 crc kubenswrapper[5037]: E1126 14:16:43.909121 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.917648 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.917714 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.917737 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.917767 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.917794 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:43Z","lastTransitionTime":"2025-11-26T14:16:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.928477 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1755f26d-9772-47cd-9336-8c3e94febe60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af730a97f25a795f2f5f5a9b59a3c72868fd1d8f16a451fed1f7ce947779786e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10f95ff9c8fe951bea68ca3932581ecdcb55eee4f45bd79eeeb314fbd67ee80\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://222073fcbe74545f98ff4e8e05ced7ddc2e23933edff2e2135da7fbc33cfac57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:43Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.947912 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:43Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.963557 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cdzgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e677a13-ab89-4820-868f-ad848e66e4b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966a02276865593c5e0d10cb8b03dcfa5da44a3f1fe26a29d17c28c868157eb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j9kgj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1f5ee9860f5858603e973bab92f7eb597b2343eaadeda4e8c58ae962d61223\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j9kgj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:16:08Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cdzgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:43Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.976788 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92b37f5a43045595441dda27ecce78e85a7172a9f0b9301b713e4f639388be9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0c3205634aaa1ec9add93ff4da5799da6c5f8702a91abaa5d6b52dfc77a0ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:43Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:43 crc kubenswrapper[5037]: I1126 14:16:43.991788 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:43Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.007330 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3393ec4-cc72-499a-8557-ec6ca329a142\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5654633d7930074536d4d9b179c36da442f07ef7e7e44c498c38ad51f21c4cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hn6x5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:44Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.021887 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.021943 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.021957 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.021978 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.021992 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:44Z","lastTransitionTime":"2025-11-26T14:16:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.023634 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-lxpjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"490e7d88-ae7f-45f9-ab12-598c33e3bc69\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T14:16:43Z\\\",\\\"message\\\":\\\"2025-11-26T14:15:57+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_a0e93ef9-592c-4dc8-9f70-00f3462cdb7a\\\\n2025-11-26T14:15:57+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_a0e93ef9-592c-4dc8-9f70-00f3462cdb7a to /host/opt/cni/bin/\\\\n2025-11-26T14:15:57Z [verbose] multus-daemon started\\\\n2025-11-26T14:15:57Z [verbose] Readiness Indicator file check\\\\n2025-11-26T14:16:42Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5sbcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-lxpjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:44Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.043639 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"454ee6da-70e5-4d30-89e5-19a35123a278\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f1e8bfa4deb76c13528d9aa2414c14ba6cc0e4637f2cf84c153398b360cad4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://306a3ae23bf504e98f4e7be45cebf984a5dbf47fda9720237c881cf65de43b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0319889506261e48b8db06cd292ba17fb46399b0b2063670c5c0e179a801f9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86a35a51d679468b21fdd174d7148d46f8c1acddbae627ed5c27b61aa399b897\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf3e3bb0b0e0730b9bbd45aad381d5f38940c4a36676db5e9264ccb473f173f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feebe91c810ee2c7c5f9aefe54887ecbc31a89a83a03ac6bbac7f373e15752e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68b717ee0e14bec16d53d7823747258f8d3b9ef55dc27cc479637ddc9fe360e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68b717ee0e14bec16d53d7823747258f8d3b9ef55dc27cc479637ddc9fe360e3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T14:16:19Z\\\",\\\"message\\\":\\\"for services for network=default\\\\nI1126 14:16:19.926961 6706 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 14:16:19.927047 6706 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 14:16:19.927368 6706 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 14:16:19.927492 6706 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 14:16:19.927856 6706 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 14:16:19.927911 6706 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 14:16:19.927973 6706 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 14:16:19.927987 6706 factory.go:656] Stopping watch factory\\\\nI1126 14:16:19.928004 6706 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 14:16:19.928574 6706 ovnkube.go:599] Stopped ovnkube\\\\nI1126 14:16:19.928626 6706 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 14:16:19.928724 6706 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:19Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-fdhhj_openshift-ovn-kubernetes(454ee6da-70e5-4d30-89e5-19a35123a278)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3b5707990abcd8005bb71376bda6e0f62c32c806b11c5db27e0e06e5ca90c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fdhhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:44Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.064256 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"821d0155-28e9-4160-8885-aa8cc1d60197\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"t denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 14:15:53.421172 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 14:15:53.421210 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421215 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421220 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 14:15:53.421225 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 14:15:53.421228 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 14:15:53.421231 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 14:15:53.421248 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1126 14:15:53.426755 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3728845122/tls.crt::/tmp/serving-cert-3728845122/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764166537\\\\\\\\\\\\\\\" (2025-11-26 14:15:36 +0000 UTC to 2025-12-26 14:15:37 +0000 UTC (now=2025-11-26 14:15:53.426718319 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426903 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764166548\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764166547\\\\\\\\\\\\\\\" (2025-11-26 13:15:47 +0000 UTC to 2026-11-26 13:15:47 +0000 UTC (now=2025-11-26 14:15:53.426883943 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426929 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1126 14:15:53.426951 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nF1126 14:15:53.427030 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:44Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.077252 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:44Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.090920 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d163276317717346ec2c289a779a2784a200c0a4230bbcef92def1d1c55fcab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:44Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.101893 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bcabca26fad475e5fa46de4c0683cb4671a209cc69dbd1509f933cf799091e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae57cbd99d2dcba3594b74304119a4a8030da193dce32afd77079b3cfaf45713\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8jk2d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:44Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.114142 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wjch9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b18a6f09-7a1e-4965-81e2-dde847147b41\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zp6hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zp6hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:16:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wjch9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:44Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.124935 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.124977 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.124993 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.125012 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.125025 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:44Z","lastTransitionTime":"2025-11-26T14:16:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.127781 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dd1172bb-f8ba-452e-9438-1e4e064466fb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b728895e4ecd26bbe5587512878f5dfb72643d07acc38dccecdf55d9369d1811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75e4e0ccd9d317e18bd7f97c06cdc5d2bcb53c2de228f3619c894d964304770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30b88b73e6299d048160f3e7b1698df43e63aa1dc98e86f8472bc47994019f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://862c88c8d26bb3f4d41d277c4df81162f98e6f27a1e191d4fe45c2c29eef6612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://862c88c8d26bb3f4d41d277c4df81162f98e6f27a1e191d4fe45c2c29eef6612\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:44Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.141524 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2e7efb219e45d8ae5c49bd0dfaa921f6c02e4646ea234df0f8b1a3f50adab58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:44Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.154240 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8tjq6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f4a637d-4b3f-4289-a84c-cd2559430a0e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad15902c02983c178ab3ce11a5103fa144f6dd39fd78aa6243bf9babd10861e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mjhs8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8tjq6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:44Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.169209 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7bxxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd349c4b-e265-4484-ab92-b4328ebde7fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7d3a1479d3bf371e9b7b4bc4c57843ede4b11d782b732245781ec31e0da71f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrqz9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7bxxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:44Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.228017 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.228074 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.228087 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.228110 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.228124 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:44Z","lastTransitionTime":"2025-11-26T14:16:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.331104 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.331149 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.331162 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.331180 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.331191 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:44Z","lastTransitionTime":"2025-11-26T14:16:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.434097 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.434139 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.434157 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.434176 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.434188 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:44Z","lastTransitionTime":"2025-11-26T14:16:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.464127 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-lxpjp_490e7d88-ae7f-45f9-ab12-598c33e3bc69/kube-multus/0.log" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.464175 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-lxpjp" event={"ID":"490e7d88-ae7f-45f9-ab12-598c33e3bc69","Type":"ContainerStarted","Data":"58232632cfc8ddcd9e524acaf4b195314aeed89c0c6f892596b6020a82de4d38"} Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.479007 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"821d0155-28e9-4160-8885-aa8cc1d60197\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"t denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 14:15:53.421172 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 14:15:53.421210 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421215 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421220 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 14:15:53.421225 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 14:15:53.421228 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 14:15:53.421231 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 14:15:53.421248 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1126 14:15:53.426755 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3728845122/tls.crt::/tmp/serving-cert-3728845122/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764166537\\\\\\\\\\\\\\\" (2025-11-26 14:15:36 +0000 UTC to 2025-12-26 14:15:37 +0000 UTC (now=2025-11-26 14:15:53.426718319 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426903 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764166548\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764166547\\\\\\\\\\\\\\\" (2025-11-26 13:15:47 +0000 UTC to 2026-11-26 13:15:47 +0000 UTC (now=2025-11-26 14:15:53.426883943 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426929 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1126 14:15:53.426951 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nF1126 14:15:53.427030 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:44Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.493610 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:44Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.507656 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d163276317717346ec2c289a779a2784a200c0a4230bbcef92def1d1c55fcab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:44Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.522373 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bcabca26fad475e5fa46de4c0683cb4671a209cc69dbd1509f933cf799091e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae57cbd99d2dcba3594b74304119a4a8030da193dce32afd77079b3cfaf45713\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8jk2d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:44Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.537025 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.537065 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.537075 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.537091 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.537101 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:44Z","lastTransitionTime":"2025-11-26T14:16:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.553919 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"454ee6da-70e5-4d30-89e5-19a35123a278\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f1e8bfa4deb76c13528d9aa2414c14ba6cc0e4637f2cf84c153398b360cad4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://306a3ae23bf504e98f4e7be45cebf984a5dbf47fda9720237c881cf65de43b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0319889506261e48b8db06cd292ba17fb46399b0b2063670c5c0e179a801f9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86a35a51d679468b21fdd174d7148d46f8c1acddbae627ed5c27b61aa399b897\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf3e3bb0b0e0730b9bbd45aad381d5f38940c4a36676db5e9264ccb473f173f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feebe91c810ee2c7c5f9aefe54887ecbc31a89a83a03ac6bbac7f373e15752e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68b717ee0e14bec16d53d7823747258f8d3b9ef55dc27cc479637ddc9fe360e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68b717ee0e14bec16d53d7823747258f8d3b9ef55dc27cc479637ddc9fe360e3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T14:16:19Z\\\",\\\"message\\\":\\\"for services for network=default\\\\nI1126 14:16:19.926961 6706 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 14:16:19.927047 6706 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 14:16:19.927368 6706 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 14:16:19.927492 6706 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 14:16:19.927856 6706 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 14:16:19.927911 6706 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 14:16:19.927973 6706 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 14:16:19.927987 6706 factory.go:656] Stopping watch factory\\\\nI1126 14:16:19.928004 6706 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 14:16:19.928574 6706 ovnkube.go:599] Stopped ovnkube\\\\nI1126 14:16:19.928626 6706 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 14:16:19.928724 6706 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:19Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-fdhhj_openshift-ovn-kubernetes(454ee6da-70e5-4d30-89e5-19a35123a278)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3b5707990abcd8005bb71376bda6e0f62c32c806b11c5db27e0e06e5ca90c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fdhhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:44Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.569384 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dd1172bb-f8ba-452e-9438-1e4e064466fb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b728895e4ecd26bbe5587512878f5dfb72643d07acc38dccecdf55d9369d1811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75e4e0ccd9d317e18bd7f97c06cdc5d2bcb53c2de228f3619c894d964304770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30b88b73e6299d048160f3e7b1698df43e63aa1dc98e86f8472bc47994019f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://862c88c8d26bb3f4d41d277c4df81162f98e6f27a1e191d4fe45c2c29eef6612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://862c88c8d26bb3f4d41d277c4df81162f98e6f27a1e191d4fe45c2c29eef6612\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:44Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.587599 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2e7efb219e45d8ae5c49bd0dfaa921f6c02e4646ea234df0f8b1a3f50adab58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:44Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.599825 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8tjq6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f4a637d-4b3f-4289-a84c-cd2559430a0e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad15902c02983c178ab3ce11a5103fa144f6dd39fd78aa6243bf9babd10861e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mjhs8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8tjq6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:44Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.612057 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7bxxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd349c4b-e265-4484-ab92-b4328ebde7fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7d3a1479d3bf371e9b7b4bc4c57843ede4b11d782b732245781ec31e0da71f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrqz9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7bxxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:44Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.622119 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wjch9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b18a6f09-7a1e-4965-81e2-dde847147b41\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zp6hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zp6hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:16:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wjch9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:44Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.631802 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1755f26d-9772-47cd-9336-8c3e94febe60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af730a97f25a795f2f5f5a9b59a3c72868fd1d8f16a451fed1f7ce947779786e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10f95ff9c8fe951bea68ca3932581ecdcb55eee4f45bd79eeeb314fbd67ee80\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://222073fcbe74545f98ff4e8e05ced7ddc2e23933edff2e2135da7fbc33cfac57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:44Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.639713 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.639755 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.639767 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.639785 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.639797 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:44Z","lastTransitionTime":"2025-11-26T14:16:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.642780 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:44Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.653076 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cdzgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e677a13-ab89-4820-868f-ad848e66e4b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966a02276865593c5e0d10cb8b03dcfa5da44a3f1fe26a29d17c28c868157eb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j9kgj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1f5ee9860f5858603e973bab92f7eb597b2343eaadeda4e8c58ae962d61223\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j9kgj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:16:08Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cdzgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:44Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.669402 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92b37f5a43045595441dda27ecce78e85a7172a9f0b9301b713e4f639388be9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0c3205634aaa1ec9add93ff4da5799da6c5f8702a91abaa5d6b52dfc77a0ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:44Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.681316 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:44Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.696920 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3393ec4-cc72-499a-8557-ec6ca329a142\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5654633d7930074536d4d9b179c36da442f07ef7e7e44c498c38ad51f21c4cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hn6x5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:44Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.708782 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-lxpjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"490e7d88-ae7f-45f9-ab12-598c33e3bc69\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58232632cfc8ddcd9e524acaf4b195314aeed89c0c6f892596b6020a82de4d38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T14:16:43Z\\\",\\\"message\\\":\\\"2025-11-26T14:15:57+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_a0e93ef9-592c-4dc8-9f70-00f3462cdb7a\\\\n2025-11-26T14:15:57+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_a0e93ef9-592c-4dc8-9f70-00f3462cdb7a to /host/opt/cni/bin/\\\\n2025-11-26T14:15:57Z [verbose] multus-daemon started\\\\n2025-11-26T14:15:57Z [verbose] Readiness Indicator file check\\\\n2025-11-26T14:16:42Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5sbcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-lxpjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:44Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.741902 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.741953 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.741970 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.741998 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.742022 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:44Z","lastTransitionTime":"2025-11-26T14:16:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.847941 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.848007 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.848032 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.848063 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.848083 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:44Z","lastTransitionTime":"2025-11-26T14:16:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.907874 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:16:44 crc kubenswrapper[5037]: E1126 14:16:44.908072 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.953031 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.953099 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.953116 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.953141 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:44 crc kubenswrapper[5037]: I1126 14:16:44.953156 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:44Z","lastTransitionTime":"2025-11-26T14:16:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.056375 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.056444 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.056461 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.056487 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.056507 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:45Z","lastTransitionTime":"2025-11-26T14:16:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.159108 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.159157 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.159168 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.159186 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.159198 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:45Z","lastTransitionTime":"2025-11-26T14:16:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.263045 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.263109 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.263127 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.263153 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.263206 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:45Z","lastTransitionTime":"2025-11-26T14:16:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.366861 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.366938 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.366952 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.366972 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.366987 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:45Z","lastTransitionTime":"2025-11-26T14:16:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.469426 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.469474 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.469491 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.469508 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.469520 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:45Z","lastTransitionTime":"2025-11-26T14:16:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.573156 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.573206 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.573216 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.573234 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.573249 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:45Z","lastTransitionTime":"2025-11-26T14:16:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.676717 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.676793 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.676817 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.676850 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.676874 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:45Z","lastTransitionTime":"2025-11-26T14:16:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.781507 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.781636 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.781660 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.781692 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.781716 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:45Z","lastTransitionTime":"2025-11-26T14:16:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.885078 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.885125 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.885134 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.885151 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.885162 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:45Z","lastTransitionTime":"2025-11-26T14:16:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.907853 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.907920 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.907999 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:16:45 crc kubenswrapper[5037]: E1126 14:16:45.908195 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:16:45 crc kubenswrapper[5037]: E1126 14:16:45.908281 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:16:45 crc kubenswrapper[5037]: E1126 14:16:45.908385 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.988014 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.988111 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.988122 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.988143 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:45 crc kubenswrapper[5037]: I1126 14:16:45.988153 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:45Z","lastTransitionTime":"2025-11-26T14:16:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.094582 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.094659 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.094673 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.094694 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.094707 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:46Z","lastTransitionTime":"2025-11-26T14:16:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.198002 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.198074 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.198087 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.198105 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.198115 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:46Z","lastTransitionTime":"2025-11-26T14:16:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.300976 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.301019 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.301032 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.301055 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.301066 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:46Z","lastTransitionTime":"2025-11-26T14:16:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.403445 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.403474 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.403482 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.403497 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.403508 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:46Z","lastTransitionTime":"2025-11-26T14:16:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.506371 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.506430 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.506443 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.506462 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.506476 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:46Z","lastTransitionTime":"2025-11-26T14:16:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.609965 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.610009 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.610019 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.610038 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.610050 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:46Z","lastTransitionTime":"2025-11-26T14:16:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.713201 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.713329 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.713349 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.713375 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.713394 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:46Z","lastTransitionTime":"2025-11-26T14:16:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.816562 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.816610 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.816626 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.816647 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.816666 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:46Z","lastTransitionTime":"2025-11-26T14:16:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.907730 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:16:46 crc kubenswrapper[5037]: E1126 14:16:46.907908 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.908717 5037 scope.go:117] "RemoveContainer" containerID="68b717ee0e14bec16d53d7823747258f8d3b9ef55dc27cc479637ddc9fe360e3" Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.921383 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.921485 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.921505 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.921532 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:46 crc kubenswrapper[5037]: I1126 14:16:46.921552 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:46Z","lastTransitionTime":"2025-11-26T14:16:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.024273 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.024340 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.024356 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.024379 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.024393 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:47Z","lastTransitionTime":"2025-11-26T14:16:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.135310 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.135357 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.135374 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.135396 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.135408 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:47Z","lastTransitionTime":"2025-11-26T14:16:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.238089 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.238132 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.238143 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.238162 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.238175 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:47Z","lastTransitionTime":"2025-11-26T14:16:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.341899 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.341952 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.341964 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.341986 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.342000 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:47Z","lastTransitionTime":"2025-11-26T14:16:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.444538 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.444601 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.444621 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.444651 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.444703 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:47Z","lastTransitionTime":"2025-11-26T14:16:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.479384 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fdhhj_454ee6da-70e5-4d30-89e5-19a35123a278/ovnkube-controller/2.log" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.483772 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" event={"ID":"454ee6da-70e5-4d30-89e5-19a35123a278","Type":"ContainerStarted","Data":"7be4af5975c1ad5d347b761c03e870cfdbd3b774e45f13f59fc0af4bedb3772c"} Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.484260 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.503506 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dd1172bb-f8ba-452e-9438-1e4e064466fb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b728895e4ecd26bbe5587512878f5dfb72643d07acc38dccecdf55d9369d1811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75e4e0ccd9d317e18bd7f97c06cdc5d2bcb53c2de228f3619c894d964304770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30b88b73e6299d048160f3e7b1698df43e63aa1dc98e86f8472bc47994019f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://862c88c8d26bb3f4d41d277c4df81162f98e6f27a1e191d4fe45c2c29eef6612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://862c88c8d26bb3f4d41d277c4df81162f98e6f27a1e191d4fe45c2c29eef6612\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:47Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.534129 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2e7efb219e45d8ae5c49bd0dfaa921f6c02e4646ea234df0f8b1a3f50adab58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:47Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.546357 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8tjq6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f4a637d-4b3f-4289-a84c-cd2559430a0e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad15902c02983c178ab3ce11a5103fa144f6dd39fd78aa6243bf9babd10861e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mjhs8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8tjq6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:47Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.547707 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.547753 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.547771 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.547790 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.547804 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:47Z","lastTransitionTime":"2025-11-26T14:16:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.561670 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7bxxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd349c4b-e265-4484-ab92-b4328ebde7fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7d3a1479d3bf371e9b7b4bc4c57843ede4b11d782b732245781ec31e0da71f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrqz9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7bxxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:47Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.576081 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wjch9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b18a6f09-7a1e-4965-81e2-dde847147b41\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zp6hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zp6hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:16:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wjch9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:47Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.590535 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1755f26d-9772-47cd-9336-8c3e94febe60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af730a97f25a795f2f5f5a9b59a3c72868fd1d8f16a451fed1f7ce947779786e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10f95ff9c8fe951bea68ca3932581ecdcb55eee4f45bd79eeeb314fbd67ee80\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://222073fcbe74545f98ff4e8e05ced7ddc2e23933edff2e2135da7fbc33cfac57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:47Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.603531 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:47Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.618436 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cdzgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e677a13-ab89-4820-868f-ad848e66e4b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966a02276865593c5e0d10cb8b03dcfa5da44a3f1fe26a29d17c28c868157eb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j9kgj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1f5ee9860f5858603e973bab92f7eb597b2343eaadeda4e8c58ae962d61223\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j9kgj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:16:08Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cdzgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:47Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.635792 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92b37f5a43045595441dda27ecce78e85a7172a9f0b9301b713e4f639388be9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0c3205634aaa1ec9add93ff4da5799da6c5f8702a91abaa5d6b52dfc77a0ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:47Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.651055 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.651098 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.651108 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.651125 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.651138 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:47Z","lastTransitionTime":"2025-11-26T14:16:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.652845 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:47Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.674073 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3393ec4-cc72-499a-8557-ec6ca329a142\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5654633d7930074536d4d9b179c36da442f07ef7e7e44c498c38ad51f21c4cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hn6x5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:47Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.695766 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-lxpjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"490e7d88-ae7f-45f9-ab12-598c33e3bc69\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58232632cfc8ddcd9e524acaf4b195314aeed89c0c6f892596b6020a82de4d38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T14:16:43Z\\\",\\\"message\\\":\\\"2025-11-26T14:15:57+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_a0e93ef9-592c-4dc8-9f70-00f3462cdb7a\\\\n2025-11-26T14:15:57+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_a0e93ef9-592c-4dc8-9f70-00f3462cdb7a to /host/opt/cni/bin/\\\\n2025-11-26T14:15:57Z [verbose] multus-daemon started\\\\n2025-11-26T14:15:57Z [verbose] Readiness Indicator file check\\\\n2025-11-26T14:16:42Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5sbcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-lxpjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:47Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.722892 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"821d0155-28e9-4160-8885-aa8cc1d60197\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"t denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 14:15:53.421172 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 14:15:53.421210 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421215 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421220 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 14:15:53.421225 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 14:15:53.421228 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 14:15:53.421231 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 14:15:53.421248 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1126 14:15:53.426755 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3728845122/tls.crt::/tmp/serving-cert-3728845122/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764166537\\\\\\\\\\\\\\\" (2025-11-26 14:15:36 +0000 UTC to 2025-12-26 14:15:37 +0000 UTC (now=2025-11-26 14:15:53.426718319 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426903 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764166548\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764166547\\\\\\\\\\\\\\\" (2025-11-26 13:15:47 +0000 UTC to 2026-11-26 13:15:47 +0000 UTC (now=2025-11-26 14:15:53.426883943 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426929 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1126 14:15:53.426951 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nF1126 14:15:53.427030 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:47Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.745653 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:47Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.753712 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.753746 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.753756 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.753774 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.753785 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:47Z","lastTransitionTime":"2025-11-26T14:16:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.773472 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d163276317717346ec2c289a779a2784a200c0a4230bbcef92def1d1c55fcab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:47Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.788891 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bcabca26fad475e5fa46de4c0683cb4671a209cc69dbd1509f933cf799091e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae57cbd99d2dcba3594b74304119a4a8030da193dce32afd77079b3cfaf45713\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8jk2d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:47Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.807748 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"454ee6da-70e5-4d30-89e5-19a35123a278\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f1e8bfa4deb76c13528d9aa2414c14ba6cc0e4637f2cf84c153398b360cad4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://306a3ae23bf504e98f4e7be45cebf984a5dbf47fda9720237c881cf65de43b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0319889506261e48b8db06cd292ba17fb46399b0b2063670c5c0e179a801f9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86a35a51d679468b21fdd174d7148d46f8c1acddbae627ed5c27b61aa399b897\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf3e3bb0b0e0730b9bbd45aad381d5f38940c4a36676db5e9264ccb473f173f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feebe91c810ee2c7c5f9aefe54887ecbc31a89a83a03ac6bbac7f373e15752e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7be4af5975c1ad5d347b761c03e870cfdbd3b774e45f13f59fc0af4bedb3772c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68b717ee0e14bec16d53d7823747258f8d3b9ef55dc27cc479637ddc9fe360e3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T14:16:19Z\\\",\\\"message\\\":\\\"for services for network=default\\\\nI1126 14:16:19.926961 6706 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 14:16:19.927047 6706 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 14:16:19.927368 6706 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 14:16:19.927492 6706 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 14:16:19.927856 6706 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 14:16:19.927911 6706 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 14:16:19.927973 6706 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 14:16:19.927987 6706 factory.go:656] Stopping watch factory\\\\nI1126 14:16:19.928004 6706 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 14:16:19.928574 6706 ovnkube.go:599] Stopped ovnkube\\\\nI1126 14:16:19.928626 6706 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 14:16:19.928724 6706 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:19Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3b5707990abcd8005bb71376bda6e0f62c32c806b11c5db27e0e06e5ca90c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fdhhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:47Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.857033 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.857089 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.857099 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.857117 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.857131 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:47Z","lastTransitionTime":"2025-11-26T14:16:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.908270 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.908327 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:16:47 crc kubenswrapper[5037]: E1126 14:16:47.908498 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.908350 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:16:47 crc kubenswrapper[5037]: E1126 14:16:47.908587 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:16:47 crc kubenswrapper[5037]: E1126 14:16:47.908678 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.960282 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.960371 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.960385 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.960414 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:47 crc kubenswrapper[5037]: I1126 14:16:47.960431 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:47Z","lastTransitionTime":"2025-11-26T14:16:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.065353 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.065412 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.065437 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.065462 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.065479 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:48Z","lastTransitionTime":"2025-11-26T14:16:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.169670 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.169736 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.169750 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.169771 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.169785 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:48Z","lastTransitionTime":"2025-11-26T14:16:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.273825 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.273931 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.273954 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.274045 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.274067 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:48Z","lastTransitionTime":"2025-11-26T14:16:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.377172 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.377216 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.377225 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.377256 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.377266 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:48Z","lastTransitionTime":"2025-11-26T14:16:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.480498 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.480564 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.480584 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.480610 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.480626 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:48Z","lastTransitionTime":"2025-11-26T14:16:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.489686 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fdhhj_454ee6da-70e5-4d30-89e5-19a35123a278/ovnkube-controller/3.log" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.490398 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fdhhj_454ee6da-70e5-4d30-89e5-19a35123a278/ovnkube-controller/2.log" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.494000 5037 generic.go:334] "Generic (PLEG): container finished" podID="454ee6da-70e5-4d30-89e5-19a35123a278" containerID="7be4af5975c1ad5d347b761c03e870cfdbd3b774e45f13f59fc0af4bedb3772c" exitCode=1 Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.494053 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" event={"ID":"454ee6da-70e5-4d30-89e5-19a35123a278","Type":"ContainerDied","Data":"7be4af5975c1ad5d347b761c03e870cfdbd3b774e45f13f59fc0af4bedb3772c"} Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.494119 5037 scope.go:117] "RemoveContainer" containerID="68b717ee0e14bec16d53d7823747258f8d3b9ef55dc27cc479637ddc9fe360e3" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.495393 5037 scope.go:117] "RemoveContainer" containerID="7be4af5975c1ad5d347b761c03e870cfdbd3b774e45f13f59fc0af4bedb3772c" Nov 26 14:16:48 crc kubenswrapper[5037]: E1126 14:16:48.495660 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-fdhhj_openshift-ovn-kubernetes(454ee6da-70e5-4d30-89e5-19a35123a278)\"" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.518426 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d163276317717346ec2c289a779a2784a200c0a4230bbcef92def1d1c55fcab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:48Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.532426 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bcabca26fad475e5fa46de4c0683cb4671a209cc69dbd1509f933cf799091e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae57cbd99d2dcba3594b74304119a4a8030da193dce32afd77079b3cfaf45713\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8jk2d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:48Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.558147 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"454ee6da-70e5-4d30-89e5-19a35123a278\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f1e8bfa4deb76c13528d9aa2414c14ba6cc0e4637f2cf84c153398b360cad4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://306a3ae23bf504e98f4e7be45cebf984a5dbf47fda9720237c881cf65de43b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0319889506261e48b8db06cd292ba17fb46399b0b2063670c5c0e179a801f9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86a35a51d679468b21fdd174d7148d46f8c1acddbae627ed5c27b61aa399b897\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf3e3bb0b0e0730b9bbd45aad381d5f38940c4a36676db5e9264ccb473f173f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feebe91c810ee2c7c5f9aefe54887ecbc31a89a83a03ac6bbac7f373e15752e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7be4af5975c1ad5d347b761c03e870cfdbd3b774e45f13f59fc0af4bedb3772c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68b717ee0e14bec16d53d7823747258f8d3b9ef55dc27cc479637ddc9fe360e3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T14:16:19Z\\\",\\\"message\\\":\\\"for services for network=default\\\\nI1126 14:16:19.926961 6706 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 14:16:19.927047 6706 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 14:16:19.927368 6706 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 14:16:19.927492 6706 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 14:16:19.927856 6706 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 14:16:19.927911 6706 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 14:16:19.927973 6706 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 14:16:19.927987 6706 factory.go:656] Stopping watch factory\\\\nI1126 14:16:19.928004 6706 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 14:16:19.928574 6706 ovnkube.go:599] Stopped ovnkube\\\\nI1126 14:16:19.928626 6706 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 14:16:19.928724 6706 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:19Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7be4af5975c1ad5d347b761c03e870cfdbd3b774e45f13f59fc0af4bedb3772c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T14:16:47Z\\\",\\\"message\\\":\\\":{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.204:8443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {78f6184b-c7cf-436d-8cbb-4b31f8af75e8}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1126 14:16:47.941044 7069 ovn.go:134] Ensuring zone local for Pod openshift-network-operator/iptables-alerter-4ln5h in node crc\\\\nI1126 14:16:47.941055 7069 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-operator/iptables-alerter-4ln5h after 0 failed attempt(s)\\\\nI1126 14:16:47.941061 7069 default_network_controller.go:776] Recording success event on pod openshift-network-operator/iptables-alerter-4ln5h\\\\nF1126 14:16:47.941099 7069 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error oc\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3b5707990abcd8005bb71376bda6e0f62c32c806b11c5db27e0e06e5ca90c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fdhhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:48Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.573219 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"821d0155-28e9-4160-8885-aa8cc1d60197\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"t denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 14:15:53.421172 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 14:15:53.421210 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421215 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421220 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 14:15:53.421225 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 14:15:53.421228 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 14:15:53.421231 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 14:15:53.421248 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1126 14:15:53.426755 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3728845122/tls.crt::/tmp/serving-cert-3728845122/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764166537\\\\\\\\\\\\\\\" (2025-11-26 14:15:36 +0000 UTC to 2025-12-26 14:15:37 +0000 UTC (now=2025-11-26 14:15:53.426718319 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426903 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764166548\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764166547\\\\\\\\\\\\\\\" (2025-11-26 13:15:47 +0000 UTC to 2026-11-26 13:15:47 +0000 UTC (now=2025-11-26 14:15:53.426883943 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426929 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1126 14:15:53.426951 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nF1126 14:15:53.427030 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:48Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.582953 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.582983 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.582993 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.583008 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.583017 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:48Z","lastTransitionTime":"2025-11-26T14:16:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.587579 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:48Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.599094 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8tjq6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f4a637d-4b3f-4289-a84c-cd2559430a0e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad15902c02983c178ab3ce11a5103fa144f6dd39fd78aa6243bf9babd10861e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mjhs8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8tjq6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:48Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.608330 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7bxxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd349c4b-e265-4484-ab92-b4328ebde7fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7d3a1479d3bf371e9b7b4bc4c57843ede4b11d782b732245781ec31e0da71f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrqz9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7bxxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:48Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.619037 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wjch9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b18a6f09-7a1e-4965-81e2-dde847147b41\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zp6hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zp6hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:16:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wjch9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:48Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.631333 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dd1172bb-f8ba-452e-9438-1e4e064466fb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b728895e4ecd26bbe5587512878f5dfb72643d07acc38dccecdf55d9369d1811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75e4e0ccd9d317e18bd7f97c06cdc5d2bcb53c2de228f3619c894d964304770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30b88b73e6299d048160f3e7b1698df43e63aa1dc98e86f8472bc47994019f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://862c88c8d26bb3f4d41d277c4df81162f98e6f27a1e191d4fe45c2c29eef6612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://862c88c8d26bb3f4d41d277c4df81162f98e6f27a1e191d4fe45c2c29eef6612\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:48Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.648970 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2e7efb219e45d8ae5c49bd0dfaa921f6c02e4646ea234df0f8b1a3f50adab58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:48Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.666854 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1755f26d-9772-47cd-9336-8c3e94febe60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af730a97f25a795f2f5f5a9b59a3c72868fd1d8f16a451fed1f7ce947779786e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10f95ff9c8fe951bea68ca3932581ecdcb55eee4f45bd79eeeb314fbd67ee80\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://222073fcbe74545f98ff4e8e05ced7ddc2e23933edff2e2135da7fbc33cfac57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:48Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.681319 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:48Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.685500 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.685536 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.685545 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.685561 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.685572 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:48Z","lastTransitionTime":"2025-11-26T14:16:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.694352 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cdzgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e677a13-ab89-4820-868f-ad848e66e4b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966a02276865593c5e0d10cb8b03dcfa5da44a3f1fe26a29d17c28c868157eb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j9kgj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1f5ee9860f5858603e973bab92f7eb597b2343eaadeda4e8c58ae962d61223\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j9kgj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:16:08Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cdzgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:48Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.707758 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-lxpjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"490e7d88-ae7f-45f9-ab12-598c33e3bc69\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58232632cfc8ddcd9e524acaf4b195314aeed89c0c6f892596b6020a82de4d38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T14:16:43Z\\\",\\\"message\\\":\\\"2025-11-26T14:15:57+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_a0e93ef9-592c-4dc8-9f70-00f3462cdb7a\\\\n2025-11-26T14:15:57+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_a0e93ef9-592c-4dc8-9f70-00f3462cdb7a to /host/opt/cni/bin/\\\\n2025-11-26T14:15:57Z [verbose] multus-daemon started\\\\n2025-11-26T14:15:57Z [verbose] Readiness Indicator file check\\\\n2025-11-26T14:16:42Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5sbcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-lxpjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:48Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.721772 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92b37f5a43045595441dda27ecce78e85a7172a9f0b9301b713e4f639388be9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0c3205634aaa1ec9add93ff4da5799da6c5f8702a91abaa5d6b52dfc77a0ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:48Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.735409 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:48Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.750686 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3393ec4-cc72-499a-8557-ec6ca329a142\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5654633d7930074536d4d9b179c36da442f07ef7e7e44c498c38ad51f21c4cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hn6x5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:48Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.788511 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.788548 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.788556 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.788571 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.788581 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:48Z","lastTransitionTime":"2025-11-26T14:16:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.891395 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.891431 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.891443 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.891460 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.891471 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:48Z","lastTransitionTime":"2025-11-26T14:16:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.907583 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:16:48 crc kubenswrapper[5037]: E1126 14:16:48.907709 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.995544 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.995628 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.995656 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.995692 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:48 crc kubenswrapper[5037]: I1126 14:16:48.995718 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:48Z","lastTransitionTime":"2025-11-26T14:16:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.099965 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.100044 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.100064 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.100100 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.100129 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:49Z","lastTransitionTime":"2025-11-26T14:16:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.203447 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.203497 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.203509 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.203526 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.203539 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:49Z","lastTransitionTime":"2025-11-26T14:16:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.306611 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.306990 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.307124 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.307239 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.307375 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:49Z","lastTransitionTime":"2025-11-26T14:16:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.412260 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.412353 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.412374 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.412404 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.412420 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:49Z","lastTransitionTime":"2025-11-26T14:16:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.500827 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fdhhj_454ee6da-70e5-4d30-89e5-19a35123a278/ovnkube-controller/3.log" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.506758 5037 scope.go:117] "RemoveContainer" containerID="7be4af5975c1ad5d347b761c03e870cfdbd3b774e45f13f59fc0af4bedb3772c" Nov 26 14:16:49 crc kubenswrapper[5037]: E1126 14:16:49.507108 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-fdhhj_openshift-ovn-kubernetes(454ee6da-70e5-4d30-89e5-19a35123a278)\"" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.516202 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.516533 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.516659 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.517035 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.517152 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:49Z","lastTransitionTime":"2025-11-26T14:16:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.527409 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"821d0155-28e9-4160-8885-aa8cc1d60197\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"t denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 14:15:53.421172 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 14:15:53.421210 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421215 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421220 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 14:15:53.421225 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 14:15:53.421228 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 14:15:53.421231 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 14:15:53.421248 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1126 14:15:53.426755 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3728845122/tls.crt::/tmp/serving-cert-3728845122/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764166537\\\\\\\\\\\\\\\" (2025-11-26 14:15:36 +0000 UTC to 2025-12-26 14:15:37 +0000 UTC (now=2025-11-26 14:15:53.426718319 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426903 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764166548\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764166547\\\\\\\\\\\\\\\" (2025-11-26 13:15:47 +0000 UTC to 2026-11-26 13:15:47 +0000 UTC (now=2025-11-26 14:15:53.426883943 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426929 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1126 14:15:53.426951 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nF1126 14:15:53.427030 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:49Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.551233 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:49Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.567613 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d163276317717346ec2c289a779a2784a200c0a4230bbcef92def1d1c55fcab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:49Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.583827 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bcabca26fad475e5fa46de4c0683cb4671a209cc69dbd1509f933cf799091e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae57cbd99d2dcba3594b74304119a4a8030da193dce32afd77079b3cfaf45713\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8jk2d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:49Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.608679 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"454ee6da-70e5-4d30-89e5-19a35123a278\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f1e8bfa4deb76c13528d9aa2414c14ba6cc0e4637f2cf84c153398b360cad4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://306a3ae23bf504e98f4e7be45cebf984a5dbf47fda9720237c881cf65de43b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0319889506261e48b8db06cd292ba17fb46399b0b2063670c5c0e179a801f9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86a35a51d679468b21fdd174d7148d46f8c1acddbae627ed5c27b61aa399b897\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf3e3bb0b0e0730b9bbd45aad381d5f38940c4a36676db5e9264ccb473f173f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feebe91c810ee2c7c5f9aefe54887ecbc31a89a83a03ac6bbac7f373e15752e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7be4af5975c1ad5d347b761c03e870cfdbd3b774e45f13f59fc0af4bedb3772c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7be4af5975c1ad5d347b761c03e870cfdbd3b774e45f13f59fc0af4bedb3772c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T14:16:47Z\\\",\\\"message\\\":\\\":{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.204:8443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {78f6184b-c7cf-436d-8cbb-4b31f8af75e8}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1126 14:16:47.941044 7069 ovn.go:134] Ensuring zone local for Pod openshift-network-operator/iptables-alerter-4ln5h in node crc\\\\nI1126 14:16:47.941055 7069 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-operator/iptables-alerter-4ln5h after 0 failed attempt(s)\\\\nI1126 14:16:47.941061 7069 default_network_controller.go:776] Recording success event on pod openshift-network-operator/iptables-alerter-4ln5h\\\\nF1126 14:16:47.941099 7069 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error oc\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:47Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-fdhhj_openshift-ovn-kubernetes(454ee6da-70e5-4d30-89e5-19a35123a278)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3b5707990abcd8005bb71376bda6e0f62c32c806b11c5db27e0e06e5ca90c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fdhhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:49Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.620325 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.620382 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.620395 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.620419 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.620433 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:49Z","lastTransitionTime":"2025-11-26T14:16:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.629380 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dd1172bb-f8ba-452e-9438-1e4e064466fb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b728895e4ecd26bbe5587512878f5dfb72643d07acc38dccecdf55d9369d1811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75e4e0ccd9d317e18bd7f97c06cdc5d2bcb53c2de228f3619c894d964304770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30b88b73e6299d048160f3e7b1698df43e63aa1dc98e86f8472bc47994019f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://862c88c8d26bb3f4d41d277c4df81162f98e6f27a1e191d4fe45c2c29eef6612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://862c88c8d26bb3f4d41d277c4df81162f98e6f27a1e191d4fe45c2c29eef6612\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:49Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.650238 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2e7efb219e45d8ae5c49bd0dfaa921f6c02e4646ea234df0f8b1a3f50adab58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:49Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.668220 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8tjq6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f4a637d-4b3f-4289-a84c-cd2559430a0e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad15902c02983c178ab3ce11a5103fa144f6dd39fd78aa6243bf9babd10861e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mjhs8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8tjq6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:49Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.683040 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7bxxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd349c4b-e265-4484-ab92-b4328ebde7fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7d3a1479d3bf371e9b7b4bc4c57843ede4b11d782b732245781ec31e0da71f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrqz9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7bxxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:49Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.697761 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wjch9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b18a6f09-7a1e-4965-81e2-dde847147b41\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zp6hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zp6hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:16:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wjch9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:49Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.720375 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1755f26d-9772-47cd-9336-8c3e94febe60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af730a97f25a795f2f5f5a9b59a3c72868fd1d8f16a451fed1f7ce947779786e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10f95ff9c8fe951bea68ca3932581ecdcb55eee4f45bd79eeeb314fbd67ee80\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://222073fcbe74545f98ff4e8e05ced7ddc2e23933edff2e2135da7fbc33cfac57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:49Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.723694 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.723757 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.723768 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.723791 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.723810 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:49Z","lastTransitionTime":"2025-11-26T14:16:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.738926 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:49Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.755832 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cdzgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e677a13-ab89-4820-868f-ad848e66e4b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966a02276865593c5e0d10cb8b03dcfa5da44a3f1fe26a29d17c28c868157eb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j9kgj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1f5ee9860f5858603e973bab92f7eb597b2343eaadeda4e8c58ae962d61223\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j9kgj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:16:08Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cdzgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:49Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.772276 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92b37f5a43045595441dda27ecce78e85a7172a9f0b9301b713e4f639388be9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0c3205634aaa1ec9add93ff4da5799da6c5f8702a91abaa5d6b52dfc77a0ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:49Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.789729 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:49Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.816784 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3393ec4-cc72-499a-8557-ec6ca329a142\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5654633d7930074536d4d9b179c36da442f07ef7e7e44c498c38ad51f21c4cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hn6x5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:49Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.827803 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.828075 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.828198 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.828388 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.828529 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:49Z","lastTransitionTime":"2025-11-26T14:16:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.837343 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-lxpjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"490e7d88-ae7f-45f9-ab12-598c33e3bc69\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58232632cfc8ddcd9e524acaf4b195314aeed89c0c6f892596b6020a82de4d38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T14:16:43Z\\\",\\\"message\\\":\\\"2025-11-26T14:15:57+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_a0e93ef9-592c-4dc8-9f70-00f3462cdb7a\\\\n2025-11-26T14:15:57+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_a0e93ef9-592c-4dc8-9f70-00f3462cdb7a to /host/opt/cni/bin/\\\\n2025-11-26T14:15:57Z [verbose] multus-daemon started\\\\n2025-11-26T14:15:57Z [verbose] Readiness Indicator file check\\\\n2025-11-26T14:16:42Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5sbcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-lxpjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:49Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.908171 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.908688 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:16:49 crc kubenswrapper[5037]: E1126 14:16:49.908843 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:16:49 crc kubenswrapper[5037]: E1126 14:16:49.909023 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.909509 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:16:49 crc kubenswrapper[5037]: E1126 14:16:49.909782 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.931620 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.931958 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.932227 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.932555 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:49 crc kubenswrapper[5037]: I1126 14:16:49.932987 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:49Z","lastTransitionTime":"2025-11-26T14:16:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.036398 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.036468 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.036489 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.036517 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.036535 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:50Z","lastTransitionTime":"2025-11-26T14:16:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.138774 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.139120 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.139212 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.139305 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.139390 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:50Z","lastTransitionTime":"2025-11-26T14:16:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.242388 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.242757 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.242859 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.242971 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.243087 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:50Z","lastTransitionTime":"2025-11-26T14:16:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.347435 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.348012 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.348155 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.348321 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.348454 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:50Z","lastTransitionTime":"2025-11-26T14:16:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.451970 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.452039 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.452057 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.452087 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.452124 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:50Z","lastTransitionTime":"2025-11-26T14:16:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.556430 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.556500 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.556525 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.556555 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.556579 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:50Z","lastTransitionTime":"2025-11-26T14:16:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.660165 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.660238 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.660262 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.660416 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.660446 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:50Z","lastTransitionTime":"2025-11-26T14:16:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.764145 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.764222 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.764245 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.764277 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.764336 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:50Z","lastTransitionTime":"2025-11-26T14:16:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.868241 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.868365 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.868395 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.868428 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.868451 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:50Z","lastTransitionTime":"2025-11-26T14:16:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.908381 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:16:50 crc kubenswrapper[5037]: E1126 14:16:50.908625 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.971226 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.971302 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.971319 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.971339 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:50 crc kubenswrapper[5037]: I1126 14:16:50.971358 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:50Z","lastTransitionTime":"2025-11-26T14:16:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.075544 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.075621 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.075638 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.075666 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.075685 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:51Z","lastTransitionTime":"2025-11-26T14:16:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.181376 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.181465 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.181488 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.181518 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.181537 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:51Z","lastTransitionTime":"2025-11-26T14:16:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.284784 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.284863 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.284879 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.284900 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.284912 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:51Z","lastTransitionTime":"2025-11-26T14:16:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.389176 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.389236 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.389256 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.389281 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.389326 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:51Z","lastTransitionTime":"2025-11-26T14:16:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.493463 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.493540 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.493560 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.493587 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.493605 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:51Z","lastTransitionTime":"2025-11-26T14:16:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.597861 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.597931 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.597951 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.597979 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.598003 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:51Z","lastTransitionTime":"2025-11-26T14:16:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.625682 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.625754 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.625774 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.625799 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.625816 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:51Z","lastTransitionTime":"2025-11-26T14:16:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:51 crc kubenswrapper[5037]: E1126 14:16:51.646854 5037 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b247aecb-f60a-4360-9d1b-a1f9057dc4ca\\\",\\\"systemUUID\\\":\\\"4d169cbc-8c3f-42b1-afc1-3f5b57e5ed06\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:51Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.653541 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.653619 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.653638 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.653665 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.653688 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:51Z","lastTransitionTime":"2025-11-26T14:16:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:51 crc kubenswrapper[5037]: E1126 14:16:51.675266 5037 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b247aecb-f60a-4360-9d1b-a1f9057dc4ca\\\",\\\"systemUUID\\\":\\\"4d169cbc-8c3f-42b1-afc1-3f5b57e5ed06\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:51Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.681034 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.681105 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.681128 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.681160 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.681181 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:51Z","lastTransitionTime":"2025-11-26T14:16:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:51 crc kubenswrapper[5037]: E1126 14:16:51.702330 5037 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b247aecb-f60a-4360-9d1b-a1f9057dc4ca\\\",\\\"systemUUID\\\":\\\"4d169cbc-8c3f-42b1-afc1-3f5b57e5ed06\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:51Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.708611 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.708670 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.708694 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.708725 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.708745 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:51Z","lastTransitionTime":"2025-11-26T14:16:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:51 crc kubenswrapper[5037]: E1126 14:16:51.729793 5037 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b247aecb-f60a-4360-9d1b-a1f9057dc4ca\\\",\\\"systemUUID\\\":\\\"4d169cbc-8c3f-42b1-afc1-3f5b57e5ed06\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:51Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.737637 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.738023 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.738226 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.738458 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.738650 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:51Z","lastTransitionTime":"2025-11-26T14:16:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:51 crc kubenswrapper[5037]: E1126 14:16:51.763381 5037 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b247aecb-f60a-4360-9d1b-a1f9057dc4ca\\\",\\\"systemUUID\\\":\\\"4d169cbc-8c3f-42b1-afc1-3f5b57e5ed06\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:51Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:51 crc kubenswrapper[5037]: E1126 14:16:51.763564 5037 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.765998 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.766065 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.766083 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.766112 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.766131 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:51Z","lastTransitionTime":"2025-11-26T14:16:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.869504 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.869563 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.869576 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.869597 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.869611 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:51Z","lastTransitionTime":"2025-11-26T14:16:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.907330 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.907412 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.907348 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:16:51 crc kubenswrapper[5037]: E1126 14:16:51.907492 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:16:51 crc kubenswrapper[5037]: E1126 14:16:51.907571 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:16:51 crc kubenswrapper[5037]: E1126 14:16:51.907661 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.972881 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.972931 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.972945 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.972966 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:51 crc kubenswrapper[5037]: I1126 14:16:51.972978 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:51Z","lastTransitionTime":"2025-11-26T14:16:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.077328 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.077422 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.077454 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.077492 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.077520 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:52Z","lastTransitionTime":"2025-11-26T14:16:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.180933 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.181075 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.181094 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.181119 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.181137 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:52Z","lastTransitionTime":"2025-11-26T14:16:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.284922 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.285005 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.285025 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.285056 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.285078 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:52Z","lastTransitionTime":"2025-11-26T14:16:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.388916 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.388975 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.388989 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.389010 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.389085 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:52Z","lastTransitionTime":"2025-11-26T14:16:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.492988 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.493030 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.493043 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.493061 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.493076 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:52Z","lastTransitionTime":"2025-11-26T14:16:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.595497 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.595535 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.595543 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.595557 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.595566 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:52Z","lastTransitionTime":"2025-11-26T14:16:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.698427 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.698499 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.698518 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.698545 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.698565 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:52Z","lastTransitionTime":"2025-11-26T14:16:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.801434 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.801499 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.801517 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.801547 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.801569 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:52Z","lastTransitionTime":"2025-11-26T14:16:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.905421 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.905467 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.905478 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.905497 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.905508 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:52Z","lastTransitionTime":"2025-11-26T14:16:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:52 crc kubenswrapper[5037]: I1126 14:16:52.908267 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:16:52 crc kubenswrapper[5037]: E1126 14:16:52.908526 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.009248 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.009381 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.009408 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.009439 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.009461 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:53Z","lastTransitionTime":"2025-11-26T14:16:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.113262 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.113408 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.113432 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.113461 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.113480 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:53Z","lastTransitionTime":"2025-11-26T14:16:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.217435 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.217499 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.217522 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.217551 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.217570 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:53Z","lastTransitionTime":"2025-11-26T14:16:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.322223 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.322377 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.322400 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.322427 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.322445 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:53Z","lastTransitionTime":"2025-11-26T14:16:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.426662 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.426735 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.426753 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.426782 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.426805 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:53Z","lastTransitionTime":"2025-11-26T14:16:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.531034 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.531488 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.531504 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.531526 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.531544 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:53Z","lastTransitionTime":"2025-11-26T14:16:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.636023 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.636084 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.636099 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.636124 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.636139 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:53Z","lastTransitionTime":"2025-11-26T14:16:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.739248 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.739312 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.739325 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.739346 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.739361 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:53Z","lastTransitionTime":"2025-11-26T14:16:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.842956 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.843028 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.843041 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.843066 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.843080 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:53Z","lastTransitionTime":"2025-11-26T14:16:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.907546 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.907625 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.907643 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:16:53 crc kubenswrapper[5037]: E1126 14:16:53.907782 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:16:53 crc kubenswrapper[5037]: E1126 14:16:53.907883 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:16:53 crc kubenswrapper[5037]: E1126 14:16:53.907952 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.920872 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92b37f5a43045595441dda27ecce78e85a7172a9f0b9301b713e4f639388be9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0c3205634aaa1ec9add93ff4da5799da6c5f8702a91abaa5d6b52dfc77a0ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:53Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.938257 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:53Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.945574 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.945615 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.945626 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.945645 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.945659 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:53Z","lastTransitionTime":"2025-11-26T14:16:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.957892 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3393ec4-cc72-499a-8557-ec6ca329a142\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c5654633d7930074536d4d9b179c36da442f07ef7e7e44c498c38ad51f21c4cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2407f611b1e02cf265ebc0906168af3aae58a7861cb205b2282bc31225f3c301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab7d7b899f7950be14c801cdcbe825f29cf11560575e78a651e549c911c786d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd489f42ba92c081797158e43944330f92337dde4489308db5b5e0013cddc082\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://778de144ac8d1bdd57658a9e60b8bfed7bed7ac0f2c4520be959d778703e0e6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7de57b4107807469b30a27d018acc5c3b7568734da012beb4d9ac8e77c7b971c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://953c060c98e0be27a57c09f27cfab5cbfdc11b28f5a12fad6c8201dce6b4fe3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:16:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lw2l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-hn6x5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:53Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.977237 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-lxpjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"490e7d88-ae7f-45f9-ab12-598c33e3bc69\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58232632cfc8ddcd9e524acaf4b195314aeed89c0c6f892596b6020a82de4d38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T14:16:43Z\\\",\\\"message\\\":\\\"2025-11-26T14:15:57+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_a0e93ef9-592c-4dc8-9f70-00f3462cdb7a\\\\n2025-11-26T14:15:57+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_a0e93ef9-592c-4dc8-9f70-00f3462cdb7a to /host/opt/cni/bin/\\\\n2025-11-26T14:15:57Z [verbose] multus-daemon started\\\\n2025-11-26T14:15:57Z [verbose] Readiness Indicator file check\\\\n2025-11-26T14:16:42Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5sbcs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-lxpjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:53Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:53 crc kubenswrapper[5037]: I1126 14:16:53.993983 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"821d0155-28e9-4160-8885-aa8cc1d60197\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"t denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 14:15:53.421172 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 14:15:53.421210 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421215 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 14:15:53.421220 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 14:15:53.421225 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 14:15:53.421228 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 14:15:53.421231 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 14:15:53.421248 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1126 14:15:53.426755 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3728845122/tls.crt::/tmp/serving-cert-3728845122/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764166537\\\\\\\\\\\\\\\" (2025-11-26 14:15:36 +0000 UTC to 2025-12-26 14:15:37 +0000 UTC (now=2025-11-26 14:15:53.426718319 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426903 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764166548\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764166547\\\\\\\\\\\\\\\" (2025-11-26 13:15:47 +0000 UTC to 2026-11-26 13:15:47 +0000 UTC (now=2025-11-26 14:15:53.426883943 +0000 UTC))\\\\\\\"\\\\nI1126 14:15:53.426929 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1126 14:15:53.426951 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nF1126 14:15:53.427030 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:53Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.012396 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:54Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.026676 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d163276317717346ec2c289a779a2784a200c0a4230bbcef92def1d1c55fcab2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:54Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.040593 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bcabca26fad475e5fa46de4c0683cb4671a209cc69dbd1509f933cf799091e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae57cbd99d2dcba3594b74304119a4a8030da193dce32afd77079b3cfaf45713\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9v8cx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8jk2d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:54Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.048134 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.048203 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.048225 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.048256 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.048282 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:54Z","lastTransitionTime":"2025-11-26T14:16:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.061840 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"454ee6da-70e5-4d30-89e5-19a35123a278\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f1e8bfa4deb76c13528d9aa2414c14ba6cc0e4637f2cf84c153398b360cad4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://306a3ae23bf504e98f4e7be45cebf984a5dbf47fda9720237c881cf65de43b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0319889506261e48b8db06cd292ba17fb46399b0b2063670c5c0e179a801f9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86a35a51d679468b21fdd174d7148d46f8c1acddbae627ed5c27b61aa399b897\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf3e3bb0b0e0730b9bbd45aad381d5f38940c4a36676db5e9264ccb473f173f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feebe91c810ee2c7c5f9aefe54887ecbc31a89a83a03ac6bbac7f373e15752e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7be4af5975c1ad5d347b761c03e870cfdbd3b774e45f13f59fc0af4bedb3772c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7be4af5975c1ad5d347b761c03e870cfdbd3b774e45f13f59fc0af4bedb3772c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T14:16:47Z\\\",\\\"message\\\":\\\":{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.204:8443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {78f6184b-c7cf-436d-8cbb-4b31f8af75e8}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1126 14:16:47.941044 7069 ovn.go:134] Ensuring zone local for Pod openshift-network-operator/iptables-alerter-4ln5h in node crc\\\\nI1126 14:16:47.941055 7069 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-operator/iptables-alerter-4ln5h after 0 failed attempt(s)\\\\nI1126 14:16:47.941061 7069 default_network_controller.go:776] Recording success event on pod openshift-network-operator/iptables-alerter-4ln5h\\\\nF1126 14:16:47.941099 7069 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error oc\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T14:16:47Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-fdhhj_openshift-ovn-kubernetes(454ee6da-70e5-4d30-89e5-19a35123a278)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae3b5707990abcd8005bb71376bda6e0f62c32c806b11c5db27e0e06e5ca90c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mhgm2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fdhhj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:54Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.078707 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dd1172bb-f8ba-452e-9438-1e4e064466fb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b728895e4ecd26bbe5587512878f5dfb72643d07acc38dccecdf55d9369d1811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75e4e0ccd9d317e18bd7f97c06cdc5d2bcb53c2de228f3619c894d964304770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30b88b73e6299d048160f3e7b1698df43e63aa1dc98e86f8472bc47994019f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://862c88c8d26bb3f4d41d277c4df81162f98e6f27a1e191d4fe45c2c29eef6612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://862c88c8d26bb3f4d41d277c4df81162f98e6f27a1e191d4fe45c2c29eef6612\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T14:15:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:54Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.124128 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2e7efb219e45d8ae5c49bd0dfaa921f6c02e4646ea234df0f8b1a3f50adab58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:54Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.148517 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8tjq6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f4a637d-4b3f-4289-a84c-cd2559430a0e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad15902c02983c178ab3ce11a5103fa144f6dd39fd78aa6243bf9babd10861e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mjhs8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8tjq6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:54Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.151556 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.151594 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.151605 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.151621 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.151632 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:54Z","lastTransitionTime":"2025-11-26T14:16:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.165251 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7bxxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd349c4b-e265-4484-ab92-b4328ebde7fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7d3a1479d3bf371e9b7b4bc4c57843ede4b11d782b732245781ec31e0da71f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrqz9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7bxxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:54Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.177333 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-wjch9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b18a6f09-7a1e-4965-81e2-dde847147b41\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zp6hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zp6hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:16:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-wjch9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:54Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.190322 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1755f26d-9772-47cd-9336-8c3e94febe60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af730a97f25a795f2f5f5a9b59a3c72868fd1d8f16a451fed1f7ce947779786e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10f95ff9c8fe951bea68ca3932581ecdcb55eee4f45bd79eeeb314fbd67ee80\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://222073fcbe74545f98ff4e8e05ced7ddc2e23933edff2e2135da7fbc33cfac57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:15:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:15:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:54Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.207940 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T14:15:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:54Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.219636 5037 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cdzgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e677a13-ab89-4820-868f-ad848e66e4b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T14:16:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966a02276865593c5e0d10cb8b03dcfa5da44a3f1fe26a29d17c28c868157eb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j9kgj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1f5ee9860f5858603e973bab92f7eb597b2343eaadeda4e8c58ae962d61223\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T14:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j9kgj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T14:16:08Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cdzgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:16:54Z is after 2025-08-24T17:21:41Z" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.254278 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.254354 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.254370 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.254389 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.254402 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:54Z","lastTransitionTime":"2025-11-26T14:16:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.357606 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.357650 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.357670 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.357696 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.357715 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:54Z","lastTransitionTime":"2025-11-26T14:16:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.460546 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.460616 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.460679 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.460712 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.460730 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:54Z","lastTransitionTime":"2025-11-26T14:16:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.563768 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.563848 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.563861 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.563881 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.563894 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:54Z","lastTransitionTime":"2025-11-26T14:16:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.666972 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.667042 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.667066 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.667101 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.667124 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:54Z","lastTransitionTime":"2025-11-26T14:16:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.770345 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.770409 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.770424 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.770449 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.770464 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:54Z","lastTransitionTime":"2025-11-26T14:16:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.874851 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.874923 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.874940 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.874968 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.874988 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:54Z","lastTransitionTime":"2025-11-26T14:16:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.907220 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:16:54 crc kubenswrapper[5037]: E1126 14:16:54.907467 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.978935 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.979007 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.979032 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.979064 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:54 crc kubenswrapper[5037]: I1126 14:16:54.979090 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:54Z","lastTransitionTime":"2025-11-26T14:16:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.082567 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.082672 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.082704 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.082735 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.082757 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:55Z","lastTransitionTime":"2025-11-26T14:16:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.187082 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.187279 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.187367 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.187403 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.187427 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:55Z","lastTransitionTime":"2025-11-26T14:16:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.291116 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.291162 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.291177 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.291193 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.291205 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:55Z","lastTransitionTime":"2025-11-26T14:16:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.395162 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.395218 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.395236 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.395260 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.395279 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:55Z","lastTransitionTime":"2025-11-26T14:16:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.497921 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.497967 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.497976 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.497991 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.498001 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:55Z","lastTransitionTime":"2025-11-26T14:16:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.601651 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.601730 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.601757 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.601786 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.601808 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:55Z","lastTransitionTime":"2025-11-26T14:16:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.704905 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.704966 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.704978 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.705000 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.705016 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:55Z","lastTransitionTime":"2025-11-26T14:16:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.807531 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.807582 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.807597 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.807617 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.807631 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:55Z","lastTransitionTime":"2025-11-26T14:16:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.908243 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.908242 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:16:55 crc kubenswrapper[5037]: E1126 14:16:55.908435 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.908235 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:16:55 crc kubenswrapper[5037]: E1126 14:16:55.908586 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:16:55 crc kubenswrapper[5037]: E1126 14:16:55.908777 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.910404 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.910463 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.910486 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.910517 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:55 crc kubenswrapper[5037]: I1126 14:16:55.910539 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:55Z","lastTransitionTime":"2025-11-26T14:16:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.014694 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.014754 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.014775 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.014804 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.014825 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:56Z","lastTransitionTime":"2025-11-26T14:16:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.119952 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.120007 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.120021 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.120044 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.120061 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:56Z","lastTransitionTime":"2025-11-26T14:16:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.223093 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.223159 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.223172 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.223195 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.223209 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:56Z","lastTransitionTime":"2025-11-26T14:16:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.326677 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.326741 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.326758 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.326783 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.326799 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:56Z","lastTransitionTime":"2025-11-26T14:16:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.429751 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.429824 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.429843 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.429873 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.429890 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:56Z","lastTransitionTime":"2025-11-26T14:16:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.533452 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.533518 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.533536 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.533560 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.533578 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:56Z","lastTransitionTime":"2025-11-26T14:16:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.637966 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.638030 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.638049 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.638081 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.638104 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:56Z","lastTransitionTime":"2025-11-26T14:16:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.741416 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.741488 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.741505 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.741533 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.741552 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:56Z","lastTransitionTime":"2025-11-26T14:16:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.844223 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.844315 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.844337 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.844366 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.844387 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:56Z","lastTransitionTime":"2025-11-26T14:16:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.907251 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:16:56 crc kubenswrapper[5037]: E1126 14:16:56.907446 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.947345 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.947406 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.947452 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.947478 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:56 crc kubenswrapper[5037]: I1126 14:16:56.947494 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:56Z","lastTransitionTime":"2025-11-26T14:16:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.050463 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.050529 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.050545 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.050576 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.050597 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:57Z","lastTransitionTime":"2025-11-26T14:16:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.154520 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.154590 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.154608 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.154634 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.154654 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:57Z","lastTransitionTime":"2025-11-26T14:16:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.257831 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.257905 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.257929 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.257954 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.257971 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:57Z","lastTransitionTime":"2025-11-26T14:16:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.363851 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.363924 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.363939 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.363963 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.363983 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:57Z","lastTransitionTime":"2025-11-26T14:16:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.468585 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.468673 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.468700 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.468734 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.468757 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:57Z","lastTransitionTime":"2025-11-26T14:16:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.572471 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.572536 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.572554 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.572581 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.572600 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:57Z","lastTransitionTime":"2025-11-26T14:16:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.676200 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.676273 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.676328 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.676358 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.676379 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:57Z","lastTransitionTime":"2025-11-26T14:16:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.779210 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.779263 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.779276 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.779314 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.779329 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:57Z","lastTransitionTime":"2025-11-26T14:16:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.853792 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.853958 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.853995 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.854025 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.854059 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:16:57 crc kubenswrapper[5037]: E1126 14:16:57.854150 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:18:01.854125905 +0000 UTC m=+148.650896089 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:16:57 crc kubenswrapper[5037]: E1126 14:16:57.854168 5037 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 14:16:57 crc kubenswrapper[5037]: E1126 14:16:57.854206 5037 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 14:16:57 crc kubenswrapper[5037]: E1126 14:16:57.854224 5037 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 14:16:57 crc kubenswrapper[5037]: E1126 14:16:57.854242 5037 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 14:16:57 crc kubenswrapper[5037]: E1126 14:16:57.854335 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 14:18:01.854314259 +0000 UTC m=+148.651084443 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 14:16:57 crc kubenswrapper[5037]: E1126 14:16:57.854395 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 14:18:01.85436221 +0000 UTC m=+148.651132434 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 14:16:57 crc kubenswrapper[5037]: E1126 14:16:57.854426 5037 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 14:16:57 crc kubenswrapper[5037]: E1126 14:16:57.854600 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 14:18:01.854564815 +0000 UTC m=+148.651335199 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 14:16:57 crc kubenswrapper[5037]: E1126 14:16:57.854437 5037 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 14:16:57 crc kubenswrapper[5037]: E1126 14:16:57.854631 5037 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 14:16:57 crc kubenswrapper[5037]: E1126 14:16:57.854642 5037 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 14:16:57 crc kubenswrapper[5037]: E1126 14:16:57.854676 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 14:18:01.854667288 +0000 UTC m=+148.651437592 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.883551 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.883675 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.883693 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.883751 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.883767 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:57Z","lastTransitionTime":"2025-11-26T14:16:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.907878 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.907901 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.907876 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:16:57 crc kubenswrapper[5037]: E1126 14:16:57.908017 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:16:57 crc kubenswrapper[5037]: E1126 14:16:57.908189 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:16:57 crc kubenswrapper[5037]: E1126 14:16:57.908456 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.987096 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.987253 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.987276 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.987337 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:57 crc kubenswrapper[5037]: I1126 14:16:57.987364 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:57Z","lastTransitionTime":"2025-11-26T14:16:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.091245 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.091372 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.091396 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.091426 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.091449 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:58Z","lastTransitionTime":"2025-11-26T14:16:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.194909 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.195020 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.195049 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.195099 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.195122 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:58Z","lastTransitionTime":"2025-11-26T14:16:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.298199 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.298254 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.298262 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.298280 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.298305 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:58Z","lastTransitionTime":"2025-11-26T14:16:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.401756 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.401815 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.401826 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.401842 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.401851 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:58Z","lastTransitionTime":"2025-11-26T14:16:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.505833 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.505895 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.505912 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.505936 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.505954 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:58Z","lastTransitionTime":"2025-11-26T14:16:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.609495 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.609546 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.609561 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.609579 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.609590 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:58Z","lastTransitionTime":"2025-11-26T14:16:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.712901 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.712958 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.712987 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.713021 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.713113 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:58Z","lastTransitionTime":"2025-11-26T14:16:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.816732 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.816829 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.816847 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.816878 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.816896 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:58Z","lastTransitionTime":"2025-11-26T14:16:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.907225 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:16:58 crc kubenswrapper[5037]: E1126 14:16:58.907605 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.920757 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.920804 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.920819 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.920840 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:58 crc kubenswrapper[5037]: I1126 14:16:58.920854 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:58Z","lastTransitionTime":"2025-11-26T14:16:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.024230 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.024281 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.024314 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.024334 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.024348 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:59Z","lastTransitionTime":"2025-11-26T14:16:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.127122 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.127181 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.127200 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.127227 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.127245 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:59Z","lastTransitionTime":"2025-11-26T14:16:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.230088 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.230133 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.230146 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.230165 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.230178 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:59Z","lastTransitionTime":"2025-11-26T14:16:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.334067 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.334133 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.334173 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.334212 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.334236 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:59Z","lastTransitionTime":"2025-11-26T14:16:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.437953 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.438075 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.438103 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.438143 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.438168 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:59Z","lastTransitionTime":"2025-11-26T14:16:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.541545 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.541603 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.541622 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.541644 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.541660 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:59Z","lastTransitionTime":"2025-11-26T14:16:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.644583 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.644625 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.644635 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.644654 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.644667 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:59Z","lastTransitionTime":"2025-11-26T14:16:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.748166 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.748323 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.748342 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.748369 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.748390 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:59Z","lastTransitionTime":"2025-11-26T14:16:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.851602 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.851642 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.851653 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.851671 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.851681 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:59Z","lastTransitionTime":"2025-11-26T14:16:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.907598 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.907679 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.907713 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:16:59 crc kubenswrapper[5037]: E1126 14:16:59.907763 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:16:59 crc kubenswrapper[5037]: E1126 14:16:59.908057 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:16:59 crc kubenswrapper[5037]: E1126 14:16:59.908148 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.955184 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.955247 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.955267 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.955324 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:16:59 crc kubenswrapper[5037]: I1126 14:16:59.955343 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:16:59Z","lastTransitionTime":"2025-11-26T14:16:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.059255 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.059358 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.059375 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.059401 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.059417 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:00Z","lastTransitionTime":"2025-11-26T14:17:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.162264 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.162383 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.162404 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.162436 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.162457 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:00Z","lastTransitionTime":"2025-11-26T14:17:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.265434 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.265510 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.265531 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.265563 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.265588 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:00Z","lastTransitionTime":"2025-11-26T14:17:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.369074 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.369157 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.369183 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.369254 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.369275 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:00Z","lastTransitionTime":"2025-11-26T14:17:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.472731 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.472783 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.472800 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.472822 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.472837 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:00Z","lastTransitionTime":"2025-11-26T14:17:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.576487 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.576554 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.576573 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.576598 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.576616 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:00Z","lastTransitionTime":"2025-11-26T14:17:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.679880 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.679950 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.679968 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.680024 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.680046 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:00Z","lastTransitionTime":"2025-11-26T14:17:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.783926 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.784020 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.784048 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.784084 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.784109 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:00Z","lastTransitionTime":"2025-11-26T14:17:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.887555 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.888128 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.888143 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.888161 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.888176 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:00Z","lastTransitionTime":"2025-11-26T14:17:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.907472 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:17:00 crc kubenswrapper[5037]: E1126 14:17:00.907731 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.991692 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.991796 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.991813 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.991845 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:00 crc kubenswrapper[5037]: I1126 14:17:00.991863 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:00Z","lastTransitionTime":"2025-11-26T14:17:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.095565 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.095634 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.095657 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.095685 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.095703 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:01Z","lastTransitionTime":"2025-11-26T14:17:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.198183 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.198237 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.198250 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.198281 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.198325 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:01Z","lastTransitionTime":"2025-11-26T14:17:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.301689 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.301763 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.301784 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.301813 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.301832 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:01Z","lastTransitionTime":"2025-11-26T14:17:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.404934 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.405002 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.405021 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.405059 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.405085 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:01Z","lastTransitionTime":"2025-11-26T14:17:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.508356 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.508412 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.508431 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.508456 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.508474 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:01Z","lastTransitionTime":"2025-11-26T14:17:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.612142 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.612215 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.612233 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.612274 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.612391 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:01Z","lastTransitionTime":"2025-11-26T14:17:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.714990 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.715089 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.715112 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.715135 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.715149 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:01Z","lastTransitionTime":"2025-11-26T14:17:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.818609 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.818649 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.818671 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.818697 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.818714 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:01Z","lastTransitionTime":"2025-11-26T14:17:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.908008 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.908128 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:17:01 crc kubenswrapper[5037]: E1126 14:17:01.908211 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.908230 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:17:01 crc kubenswrapper[5037]: E1126 14:17:01.908406 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:17:01 crc kubenswrapper[5037]: E1126 14:17:01.908558 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.909694 5037 scope.go:117] "RemoveContainer" containerID="7be4af5975c1ad5d347b761c03e870cfdbd3b774e45f13f59fc0af4bedb3772c" Nov 26 14:17:01 crc kubenswrapper[5037]: E1126 14:17:01.909966 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-fdhhj_openshift-ovn-kubernetes(454ee6da-70e5-4d30-89e5-19a35123a278)\"" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.923499 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.923572 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.923589 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.923613 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:01 crc kubenswrapper[5037]: I1126 14:17:01.923632 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:01Z","lastTransitionTime":"2025-11-26T14:17:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.026908 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.026970 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.026991 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.027021 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.027044 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:02Z","lastTransitionTime":"2025-11-26T14:17:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.131047 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.131120 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.131139 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.131170 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.131190 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:02Z","lastTransitionTime":"2025-11-26T14:17:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.152976 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.153114 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.153133 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.153194 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.153212 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:02Z","lastTransitionTime":"2025-11-26T14:17:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:02 crc kubenswrapper[5037]: E1126 14:17:02.172123 5037 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b247aecb-f60a-4360-9d1b-a1f9057dc4ca\\\",\\\"systemUUID\\\":\\\"4d169cbc-8c3f-42b1-afc1-3f5b57e5ed06\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:17:02Z is after 2025-08-24T17:21:41Z" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.177594 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.177640 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.177652 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.177671 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.177684 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:02Z","lastTransitionTime":"2025-11-26T14:17:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:02 crc kubenswrapper[5037]: E1126 14:17:02.197484 5037 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b247aecb-f60a-4360-9d1b-a1f9057dc4ca\\\",\\\"systemUUID\\\":\\\"4d169cbc-8c3f-42b1-afc1-3f5b57e5ed06\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:17:02Z is after 2025-08-24T17:21:41Z" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.203355 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.203408 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.203424 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.203450 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.203466 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:02Z","lastTransitionTime":"2025-11-26T14:17:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:02 crc kubenswrapper[5037]: E1126 14:17:02.220514 5037 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b247aecb-f60a-4360-9d1b-a1f9057dc4ca\\\",\\\"systemUUID\\\":\\\"4d169cbc-8c3f-42b1-afc1-3f5b57e5ed06\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:17:02Z is after 2025-08-24T17:21:41Z" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.225004 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.225077 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.225095 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.225120 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.225137 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:02Z","lastTransitionTime":"2025-11-26T14:17:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:02 crc kubenswrapper[5037]: E1126 14:17:02.240274 5037 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b247aecb-f60a-4360-9d1b-a1f9057dc4ca\\\",\\\"systemUUID\\\":\\\"4d169cbc-8c3f-42b1-afc1-3f5b57e5ed06\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:17:02Z is after 2025-08-24T17:21:41Z" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.244873 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.244913 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.244924 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.244942 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.244954 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:02Z","lastTransitionTime":"2025-11-26T14:17:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:02 crc kubenswrapper[5037]: E1126 14:17:02.260024 5037 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T14:17:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b247aecb-f60a-4360-9d1b-a1f9057dc4ca\\\",\\\"systemUUID\\\":\\\"4d169cbc-8c3f-42b1-afc1-3f5b57e5ed06\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T14:17:02Z is after 2025-08-24T17:21:41Z" Nov 26 14:17:02 crc kubenswrapper[5037]: E1126 14:17:02.260217 5037 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.262480 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.262536 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.262555 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.262580 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.262597 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:02Z","lastTransitionTime":"2025-11-26T14:17:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.366430 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.366536 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.366560 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.366639 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.366703 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:02Z","lastTransitionTime":"2025-11-26T14:17:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.469540 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.469591 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.469604 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.469623 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.469636 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:02Z","lastTransitionTime":"2025-11-26T14:17:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.571749 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.571800 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.571811 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.571831 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.571844 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:02Z","lastTransitionTime":"2025-11-26T14:17:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.675253 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.675337 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.675349 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.675365 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.675420 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:02Z","lastTransitionTime":"2025-11-26T14:17:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.777564 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.777598 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.777607 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.777621 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.777633 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:02Z","lastTransitionTime":"2025-11-26T14:17:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.880823 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.880887 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.880899 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.880917 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.880928 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:02Z","lastTransitionTime":"2025-11-26T14:17:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.907536 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:17:02 crc kubenswrapper[5037]: E1126 14:17:02.908152 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.926208 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.984256 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.984364 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.984390 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.984423 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:02 crc kubenswrapper[5037]: I1126 14:17:02.984448 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:02Z","lastTransitionTime":"2025-11-26T14:17:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.087889 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.087922 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.087930 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.087944 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.087955 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:03Z","lastTransitionTime":"2025-11-26T14:17:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.191081 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.191137 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.191147 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.191167 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.191178 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:03Z","lastTransitionTime":"2025-11-26T14:17:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.294689 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.294730 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.294739 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.294755 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.294764 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:03Z","lastTransitionTime":"2025-11-26T14:17:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.398779 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.398841 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.398860 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.398892 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.398912 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:03Z","lastTransitionTime":"2025-11-26T14:17:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.502129 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.502170 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.502179 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.502197 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.502207 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:03Z","lastTransitionTime":"2025-11-26T14:17:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.610699 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.610813 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.610853 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.610885 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.610906 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:03Z","lastTransitionTime":"2025-11-26T14:17:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.714841 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.714924 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.714943 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.714969 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.714985 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:03Z","lastTransitionTime":"2025-11-26T14:17:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.818832 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.818911 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.818935 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.818967 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.818989 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:03Z","lastTransitionTime":"2025-11-26T14:17:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.907390 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.907491 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.908609 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:17:03 crc kubenswrapper[5037]: E1126 14:17:03.908720 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:17:03 crc kubenswrapper[5037]: E1126 14:17:03.908849 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:17:03 crc kubenswrapper[5037]: E1126 14:17:03.908888 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.922925 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.922962 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.922973 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.922994 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.923007 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:03Z","lastTransitionTime":"2025-11-26T14:17:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:03 crc kubenswrapper[5037]: I1126 14:17:03.964691 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=69.964658499 podStartE2EDuration="1m9.964658499s" podCreationTimestamp="2025-11-26 14:15:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:17:03.941109748 +0000 UTC m=+90.737879942" watchObservedRunningTime="2025-11-26 14:17:03.964658499 +0000 UTC m=+90.761428723" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.002214 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podStartSLOduration=69.002183626 podStartE2EDuration="1m9.002183626s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:17:04.001827146 +0000 UTC m=+90.798597360" watchObservedRunningTime="2025-11-26 14:17:04.002183626 +0000 UTC m=+90.798953840" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.026469 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.026508 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.026522 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.026541 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.026555 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:04Z","lastTransitionTime":"2025-11-26T14:17:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.058645 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=39.058622259 podStartE2EDuration="39.058622259s" podCreationTimestamp="2025-11-26 14:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:17:04.058385493 +0000 UTC m=+90.855155697" watchObservedRunningTime="2025-11-26 14:17:04.058622259 +0000 UTC m=+90.855392443" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.098026 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-7bxxg" podStartSLOduration=70.097996931 podStartE2EDuration="1m10.097996931s" podCreationTimestamp="2025-11-26 14:15:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:17:04.097834347 +0000 UTC m=+90.894604531" watchObservedRunningTime="2025-11-26 14:17:04.097996931 +0000 UTC m=+90.894767115" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.098413 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-8tjq6" podStartSLOduration=70.0984081 podStartE2EDuration="1m10.0984081s" podCreationTimestamp="2025-11-26 14:15:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:17:04.088402394 +0000 UTC m=+90.885172578" watchObservedRunningTime="2025-11-26 14:17:04.0984081 +0000 UTC m=+90.895178284" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.123974 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=2.123957572 podStartE2EDuration="2.123957572s" podCreationTimestamp="2025-11-26 14:17:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:17:04.123850549 +0000 UTC m=+90.920620733" watchObservedRunningTime="2025-11-26 14:17:04.123957572 +0000 UTC m=+90.920727756" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.128771 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.128799 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.128809 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.128824 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.128835 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:04Z","lastTransitionTime":"2025-11-26T14:17:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.173261 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cdzgw" podStartSLOduration=69.173238638 podStartE2EDuration="1m9.173238638s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:17:04.152896106 +0000 UTC m=+90.949666310" watchObservedRunningTime="2025-11-26 14:17:04.173238638 +0000 UTC m=+90.970008822" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.187107 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=68.1870846 podStartE2EDuration="1m8.1870846s" podCreationTimestamp="2025-11-26 14:15:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:17:04.173897974 +0000 UTC m=+90.970668158" watchObservedRunningTime="2025-11-26 14:17:04.1870846 +0000 UTC m=+90.983854784" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.214936 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-hn6x5" podStartSLOduration=69.214919397 podStartE2EDuration="1m9.214919397s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:17:04.208930909 +0000 UTC m=+91.005701103" watchObservedRunningTime="2025-11-26 14:17:04.214919397 +0000 UTC m=+91.011689591" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.230776 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.230809 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.230818 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.230832 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.230842 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:04Z","lastTransitionTime":"2025-11-26T14:17:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.252748 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-lxpjp" podStartSLOduration=69.252733021 podStartE2EDuration="1m9.252733021s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:17:04.2377116 +0000 UTC m=+91.034481784" watchObservedRunningTime="2025-11-26 14:17:04.252733021 +0000 UTC m=+91.049503205" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.339127 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.339193 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.339205 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.339225 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.339237 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:04Z","lastTransitionTime":"2025-11-26T14:17:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.442373 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.442419 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.442431 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.442447 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.442460 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:04Z","lastTransitionTime":"2025-11-26T14:17:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.545637 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.545685 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.545697 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.545714 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.545724 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:04Z","lastTransitionTime":"2025-11-26T14:17:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.648959 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.649014 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.649037 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.649066 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.649088 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:04Z","lastTransitionTime":"2025-11-26T14:17:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.752518 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.752590 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.752609 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.752631 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.752646 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:04Z","lastTransitionTime":"2025-11-26T14:17:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.856132 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.856196 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.856216 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.856240 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.856258 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:04Z","lastTransitionTime":"2025-11-26T14:17:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.907998 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:17:04 crc kubenswrapper[5037]: E1126 14:17:04.908210 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.960667 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.960740 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.960786 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.960815 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:04 crc kubenswrapper[5037]: I1126 14:17:04.960834 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:04Z","lastTransitionTime":"2025-11-26T14:17:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.064139 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.064219 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.064242 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.064282 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.064357 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:05Z","lastTransitionTime":"2025-11-26T14:17:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.167546 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.167593 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.167604 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.167623 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.167636 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:05Z","lastTransitionTime":"2025-11-26T14:17:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.270925 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.270996 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.271015 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.271045 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.271065 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:05Z","lastTransitionTime":"2025-11-26T14:17:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.374362 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.374413 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.374425 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.374446 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.374459 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:05Z","lastTransitionTime":"2025-11-26T14:17:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.477162 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.477225 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.477238 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.477265 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.477279 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:05Z","lastTransitionTime":"2025-11-26T14:17:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.580943 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.580996 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.581009 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.581028 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.581043 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:05Z","lastTransitionTime":"2025-11-26T14:17:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.684491 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.684550 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.684561 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.684582 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.684594 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:05Z","lastTransitionTime":"2025-11-26T14:17:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.788059 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.788596 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.788820 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.789040 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.789544 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:05Z","lastTransitionTime":"2025-11-26T14:17:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.893259 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.893367 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.893386 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.893413 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.893431 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:05Z","lastTransitionTime":"2025-11-26T14:17:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.907980 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.908018 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.908100 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:17:05 crc kubenswrapper[5037]: E1126 14:17:05.908266 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:17:05 crc kubenswrapper[5037]: E1126 14:17:05.908753 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:17:05 crc kubenswrapper[5037]: E1126 14:17:05.909055 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.996499 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.996541 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.996561 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.996580 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:05 crc kubenswrapper[5037]: I1126 14:17:05.996595 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:05Z","lastTransitionTime":"2025-11-26T14:17:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.098975 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.099051 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.099069 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.099096 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.099117 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:06Z","lastTransitionTime":"2025-11-26T14:17:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.202798 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.202854 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.202867 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.202893 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.202907 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:06Z","lastTransitionTime":"2025-11-26T14:17:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.305749 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.305810 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.305826 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.305849 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.305865 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:06Z","lastTransitionTime":"2025-11-26T14:17:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.410839 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.410913 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.410929 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.410956 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.410981 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:06Z","lastTransitionTime":"2025-11-26T14:17:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.515007 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.515056 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.515065 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.515083 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.515093 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:06Z","lastTransitionTime":"2025-11-26T14:17:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.618930 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.619512 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.619716 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.619959 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.620204 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:06Z","lastTransitionTime":"2025-11-26T14:17:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.724211 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.724270 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.724320 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.724383 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.724402 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:06Z","lastTransitionTime":"2025-11-26T14:17:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.828516 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.828596 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.828620 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.828651 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.828673 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:06Z","lastTransitionTime":"2025-11-26T14:17:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.907486 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:17:06 crc kubenswrapper[5037]: E1126 14:17:06.907695 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.933359 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.933418 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.933432 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.933450 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:06 crc kubenswrapper[5037]: I1126 14:17:06.933462 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:06Z","lastTransitionTime":"2025-11-26T14:17:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.037071 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.037111 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.037119 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.037136 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.037148 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:07Z","lastTransitionTime":"2025-11-26T14:17:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.140713 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.140764 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.140776 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.140792 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.140805 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:07Z","lastTransitionTime":"2025-11-26T14:17:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.243344 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.243420 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.243437 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.243464 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.243485 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:07Z","lastTransitionTime":"2025-11-26T14:17:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.347263 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.347338 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.347351 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.347376 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.347392 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:07Z","lastTransitionTime":"2025-11-26T14:17:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.450580 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.450621 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.450654 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.450677 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.450690 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:07Z","lastTransitionTime":"2025-11-26T14:17:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.554836 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.554905 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.554957 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.554981 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.554993 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:07Z","lastTransitionTime":"2025-11-26T14:17:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.659107 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.659189 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.659202 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.659222 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.659236 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:07Z","lastTransitionTime":"2025-11-26T14:17:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.762944 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.763016 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.763035 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.763063 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.763080 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:07Z","lastTransitionTime":"2025-11-26T14:17:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.866836 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.866898 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.866916 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.866939 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.866956 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:07Z","lastTransitionTime":"2025-11-26T14:17:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.907792 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.907873 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.908040 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:17:07 crc kubenswrapper[5037]: E1126 14:17:07.908239 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:17:07 crc kubenswrapper[5037]: E1126 14:17:07.908426 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:17:07 crc kubenswrapper[5037]: E1126 14:17:07.908616 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.970162 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.970223 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.970240 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.970267 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:07 crc kubenswrapper[5037]: I1126 14:17:07.970324 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:07Z","lastTransitionTime":"2025-11-26T14:17:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.073418 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.073464 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.073474 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.073489 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.073501 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:08Z","lastTransitionTime":"2025-11-26T14:17:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.176426 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.176576 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.176594 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.176618 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.176637 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:08Z","lastTransitionTime":"2025-11-26T14:17:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.279352 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.279419 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.279432 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.279449 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.279459 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:08Z","lastTransitionTime":"2025-11-26T14:17:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.383756 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.383833 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.383850 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.383875 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.383892 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:08Z","lastTransitionTime":"2025-11-26T14:17:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.486058 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.486115 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.486127 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.486146 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.486156 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:08Z","lastTransitionTime":"2025-11-26T14:17:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.588636 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.588690 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.588701 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.588720 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.588732 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:08Z","lastTransitionTime":"2025-11-26T14:17:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.691686 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.691727 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.691735 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.691750 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.691761 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:08Z","lastTransitionTime":"2025-11-26T14:17:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.795652 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.795692 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.795703 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.795732 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.795746 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:08Z","lastTransitionTime":"2025-11-26T14:17:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.898412 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.898479 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.898513 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.898550 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.898569 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:08Z","lastTransitionTime":"2025-11-26T14:17:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:08 crc kubenswrapper[5037]: I1126 14:17:08.908039 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:17:08 crc kubenswrapper[5037]: E1126 14:17:08.908232 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.000731 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.000792 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.000804 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.000829 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.000870 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:09Z","lastTransitionTime":"2025-11-26T14:17:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.104280 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.104368 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.104387 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.104413 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.104432 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:09Z","lastTransitionTime":"2025-11-26T14:17:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.207038 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.207099 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.207117 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.207148 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.207167 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:09Z","lastTransitionTime":"2025-11-26T14:17:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.310211 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.310267 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.310282 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.310322 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.310334 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:09Z","lastTransitionTime":"2025-11-26T14:17:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.414076 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.414123 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.414135 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.414157 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.414169 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:09Z","lastTransitionTime":"2025-11-26T14:17:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.517537 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.517597 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.517614 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.517635 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.517651 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:09Z","lastTransitionTime":"2025-11-26T14:17:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.620838 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.620892 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.620908 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.620929 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.620943 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:09Z","lastTransitionTime":"2025-11-26T14:17:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.724202 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.724267 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.724321 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.724349 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.724406 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:09Z","lastTransitionTime":"2025-11-26T14:17:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.827919 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.827986 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.828008 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.828042 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.828068 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:09Z","lastTransitionTime":"2025-11-26T14:17:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.907418 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.907418 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:17:09 crc kubenswrapper[5037]: E1126 14:17:09.907632 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.907451 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:17:09 crc kubenswrapper[5037]: E1126 14:17:09.907880 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:17:09 crc kubenswrapper[5037]: E1126 14:17:09.907851 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.931172 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.931215 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.931226 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.931247 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:09 crc kubenswrapper[5037]: I1126 14:17:09.931261 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:09Z","lastTransitionTime":"2025-11-26T14:17:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.035685 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.035750 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.035768 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.035794 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.035809 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:10Z","lastTransitionTime":"2025-11-26T14:17:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.139832 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.139877 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.139890 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.139927 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.139939 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:10Z","lastTransitionTime":"2025-11-26T14:17:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.243830 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.243880 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.243893 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.243913 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.243924 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:10Z","lastTransitionTime":"2025-11-26T14:17:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.346862 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.346916 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.346934 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.346957 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.346971 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:10Z","lastTransitionTime":"2025-11-26T14:17:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.449676 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.449719 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.449733 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.449754 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.449766 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:10Z","lastTransitionTime":"2025-11-26T14:17:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.552949 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.552999 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.553009 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.553029 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.553385 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:10Z","lastTransitionTime":"2025-11-26T14:17:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.656548 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.656616 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.656639 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.656669 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.656690 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:10Z","lastTransitionTime":"2025-11-26T14:17:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.759546 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.759612 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.759628 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.759653 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.759672 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:10Z","lastTransitionTime":"2025-11-26T14:17:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.862216 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.862361 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.862381 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.862452 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.862470 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:10Z","lastTransitionTime":"2025-11-26T14:17:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.907116 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:17:10 crc kubenswrapper[5037]: E1126 14:17:10.907263 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.966885 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.966938 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.966958 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.966983 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:10 crc kubenswrapper[5037]: I1126 14:17:10.967149 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:10Z","lastTransitionTime":"2025-11-26T14:17:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.069928 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.070000 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.070023 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.070052 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.070078 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:11Z","lastTransitionTime":"2025-11-26T14:17:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.173476 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.173546 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.173585 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.173619 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.173641 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:11Z","lastTransitionTime":"2025-11-26T14:17:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.276740 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.276800 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.276817 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.276843 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.276862 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:11Z","lastTransitionTime":"2025-11-26T14:17:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.380512 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.380563 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.380574 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.380591 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.380604 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:11Z","lastTransitionTime":"2025-11-26T14:17:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.483192 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.483267 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.483318 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.483350 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.483368 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:11Z","lastTransitionTime":"2025-11-26T14:17:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.587558 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.587644 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.587668 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.587700 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.587719 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:11Z","lastTransitionTime":"2025-11-26T14:17:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.691842 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.691916 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.691937 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.691968 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.691992 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:11Z","lastTransitionTime":"2025-11-26T14:17:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.794853 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.794932 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.794954 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.794986 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.795009 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:11Z","lastTransitionTime":"2025-11-26T14:17:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.898552 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.898621 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.898639 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.898662 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.898680 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:11Z","lastTransitionTime":"2025-11-26T14:17:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.908029 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.908131 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:17:11 crc kubenswrapper[5037]: I1126 14:17:11.908056 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:17:11 crc kubenswrapper[5037]: E1126 14:17:11.908240 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:17:11 crc kubenswrapper[5037]: E1126 14:17:11.908462 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:17:11 crc kubenswrapper[5037]: E1126 14:17:11.908621 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.001889 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.001993 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.002012 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.002038 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.002055 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:12Z","lastTransitionTime":"2025-11-26T14:17:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.106144 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.106220 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.106234 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.106258 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.106273 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:12Z","lastTransitionTime":"2025-11-26T14:17:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.209866 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.209929 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.209950 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.209983 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.210007 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:12Z","lastTransitionTime":"2025-11-26T14:17:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.313097 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.313168 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.313192 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.313222 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.313243 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:12Z","lastTransitionTime":"2025-11-26T14:17:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.416374 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.416449 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.416468 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.416496 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.416520 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:12Z","lastTransitionTime":"2025-11-26T14:17:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.520091 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.520152 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.520165 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.520196 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.520214 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:12Z","lastTransitionTime":"2025-11-26T14:17:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.624173 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.624255 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.624270 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.624347 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.624368 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:12Z","lastTransitionTime":"2025-11-26T14:17:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.640934 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.640991 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.641009 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.641038 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.641057 5037 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T14:17:12Z","lastTransitionTime":"2025-11-26T14:17:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.709501 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-2wg8p"] Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.710069 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2wg8p" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.712484 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.713411 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.713893 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.714423 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.841223 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f3929fd2-9638-465a-96b4-a438e98a3661-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-2wg8p\" (UID: \"f3929fd2-9638-465a-96b4-a438e98a3661\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2wg8p" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.841355 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f3929fd2-9638-465a-96b4-a438e98a3661-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-2wg8p\" (UID: \"f3929fd2-9638-465a-96b4-a438e98a3661\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2wg8p" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.841399 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/f3929fd2-9638-465a-96b4-a438e98a3661-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-2wg8p\" (UID: \"f3929fd2-9638-465a-96b4-a438e98a3661\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2wg8p" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.841525 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f3929fd2-9638-465a-96b4-a438e98a3661-service-ca\") pod \"cluster-version-operator-5c965bbfc6-2wg8p\" (UID: \"f3929fd2-9638-465a-96b4-a438e98a3661\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2wg8p" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.841575 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/f3929fd2-9638-465a-96b4-a438e98a3661-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-2wg8p\" (UID: \"f3929fd2-9638-465a-96b4-a438e98a3661\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2wg8p" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.907895 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:17:12 crc kubenswrapper[5037]: E1126 14:17:12.908049 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.942767 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f3929fd2-9638-465a-96b4-a438e98a3661-service-ca\") pod \"cluster-version-operator-5c965bbfc6-2wg8p\" (UID: \"f3929fd2-9638-465a-96b4-a438e98a3661\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2wg8p" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.942812 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/f3929fd2-9638-465a-96b4-a438e98a3661-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-2wg8p\" (UID: \"f3929fd2-9638-465a-96b4-a438e98a3661\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2wg8p" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.942864 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f3929fd2-9638-465a-96b4-a438e98a3661-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-2wg8p\" (UID: \"f3929fd2-9638-465a-96b4-a438e98a3661\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2wg8p" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.942896 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f3929fd2-9638-465a-96b4-a438e98a3661-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-2wg8p\" (UID: \"f3929fd2-9638-465a-96b4-a438e98a3661\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2wg8p" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.942938 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/f3929fd2-9638-465a-96b4-a438e98a3661-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-2wg8p\" (UID: \"f3929fd2-9638-465a-96b4-a438e98a3661\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2wg8p" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.943034 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/f3929fd2-9638-465a-96b4-a438e98a3661-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-2wg8p\" (UID: \"f3929fd2-9638-465a-96b4-a438e98a3661\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2wg8p" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.943093 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/f3929fd2-9638-465a-96b4-a438e98a3661-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-2wg8p\" (UID: \"f3929fd2-9638-465a-96b4-a438e98a3661\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2wg8p" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.944347 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f3929fd2-9638-465a-96b4-a438e98a3661-service-ca\") pod \"cluster-version-operator-5c965bbfc6-2wg8p\" (UID: \"f3929fd2-9638-465a-96b4-a438e98a3661\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2wg8p" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.953339 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f3929fd2-9638-465a-96b4-a438e98a3661-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-2wg8p\" (UID: \"f3929fd2-9638-465a-96b4-a438e98a3661\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2wg8p" Nov 26 14:17:12 crc kubenswrapper[5037]: I1126 14:17:12.961917 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f3929fd2-9638-465a-96b4-a438e98a3661-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-2wg8p\" (UID: \"f3929fd2-9638-465a-96b4-a438e98a3661\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2wg8p" Nov 26 14:17:13 crc kubenswrapper[5037]: I1126 14:17:13.034778 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2wg8p" Nov 26 14:17:13 crc kubenswrapper[5037]: I1126 14:17:13.245948 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b18a6f09-7a1e-4965-81e2-dde847147b41-metrics-certs\") pod \"network-metrics-daemon-wjch9\" (UID: \"b18a6f09-7a1e-4965-81e2-dde847147b41\") " pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:17:13 crc kubenswrapper[5037]: E1126 14:17:13.246200 5037 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 14:17:13 crc kubenswrapper[5037]: E1126 14:17:13.246687 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b18a6f09-7a1e-4965-81e2-dde847147b41-metrics-certs podName:b18a6f09-7a1e-4965-81e2-dde847147b41 nodeName:}" failed. No retries permitted until 2025-11-26 14:18:17.246660187 +0000 UTC m=+164.043430401 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b18a6f09-7a1e-4965-81e2-dde847147b41-metrics-certs") pod "network-metrics-daemon-wjch9" (UID: "b18a6f09-7a1e-4965-81e2-dde847147b41") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 14:17:13 crc kubenswrapper[5037]: I1126 14:17:13.604719 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2wg8p" event={"ID":"f3929fd2-9638-465a-96b4-a438e98a3661","Type":"ContainerStarted","Data":"5870393a834be0452197782ed9af2d5a9e476eea73be38a9a44dc8463d71d2f6"} Nov 26 14:17:13 crc kubenswrapper[5037]: I1126 14:17:13.604815 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2wg8p" event={"ID":"f3929fd2-9638-465a-96b4-a438e98a3661","Type":"ContainerStarted","Data":"efc6a23135fe06b7141a9f091c998b30e1ca7ab3186a7e79714f7e52878e7000"} Nov 26 14:17:13 crc kubenswrapper[5037]: I1126 14:17:13.632065 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2wg8p" podStartSLOduration=78.63203453 podStartE2EDuration="1m18.63203453s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:17:13.630935844 +0000 UTC m=+100.427706088" watchObservedRunningTime="2025-11-26 14:17:13.63203453 +0000 UTC m=+100.428804744" Nov 26 14:17:13 crc kubenswrapper[5037]: I1126 14:17:13.907817 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:17:13 crc kubenswrapper[5037]: I1126 14:17:13.907867 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:17:13 crc kubenswrapper[5037]: I1126 14:17:13.907746 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:17:13 crc kubenswrapper[5037]: E1126 14:17:13.909179 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:17:13 crc kubenswrapper[5037]: E1126 14:17:13.909334 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:17:13 crc kubenswrapper[5037]: E1126 14:17:13.909529 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:17:14 crc kubenswrapper[5037]: I1126 14:17:14.907504 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:17:14 crc kubenswrapper[5037]: E1126 14:17:14.907706 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:17:15 crc kubenswrapper[5037]: I1126 14:17:15.908097 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:17:15 crc kubenswrapper[5037]: I1126 14:17:15.909415 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:17:15 crc kubenswrapper[5037]: I1126 14:17:15.909737 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:17:15 crc kubenswrapper[5037]: E1126 14:17:15.909921 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:17:15 crc kubenswrapper[5037]: I1126 14:17:15.910130 5037 scope.go:117] "RemoveContainer" containerID="7be4af5975c1ad5d347b761c03e870cfdbd3b774e45f13f59fc0af4bedb3772c" Nov 26 14:17:15 crc kubenswrapper[5037]: E1126 14:17:15.910320 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:17:15 crc kubenswrapper[5037]: E1126 14:17:15.910343 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-fdhhj_openshift-ovn-kubernetes(454ee6da-70e5-4d30-89e5-19a35123a278)\"" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" Nov 26 14:17:15 crc kubenswrapper[5037]: E1126 14:17:15.910441 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:17:16 crc kubenswrapper[5037]: I1126 14:17:16.907512 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:17:16 crc kubenswrapper[5037]: E1126 14:17:16.907658 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:17:16 crc kubenswrapper[5037]: I1126 14:17:16.930019 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 26 14:17:17 crc kubenswrapper[5037]: I1126 14:17:17.908048 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:17:17 crc kubenswrapper[5037]: I1126 14:17:17.908145 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:17:17 crc kubenswrapper[5037]: I1126 14:17:17.908033 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:17:17 crc kubenswrapper[5037]: E1126 14:17:17.908259 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:17:17 crc kubenswrapper[5037]: E1126 14:17:17.908470 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:17:17 crc kubenswrapper[5037]: E1126 14:17:17.908558 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:17:18 crc kubenswrapper[5037]: I1126 14:17:18.907311 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:17:18 crc kubenswrapper[5037]: E1126 14:17:18.907520 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:17:19 crc kubenswrapper[5037]: I1126 14:17:19.907442 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:17:19 crc kubenswrapper[5037]: I1126 14:17:19.907556 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:17:19 crc kubenswrapper[5037]: I1126 14:17:19.907443 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:17:19 crc kubenswrapper[5037]: E1126 14:17:19.907662 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:17:19 crc kubenswrapper[5037]: E1126 14:17:19.907769 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:17:19 crc kubenswrapper[5037]: E1126 14:17:19.907891 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:17:20 crc kubenswrapper[5037]: I1126 14:17:20.908028 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:17:20 crc kubenswrapper[5037]: E1126 14:17:20.908370 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:17:21 crc kubenswrapper[5037]: I1126 14:17:21.907648 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:17:21 crc kubenswrapper[5037]: I1126 14:17:21.907729 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:17:21 crc kubenswrapper[5037]: I1126 14:17:21.907648 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:17:21 crc kubenswrapper[5037]: E1126 14:17:21.907867 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:17:21 crc kubenswrapper[5037]: E1126 14:17:21.907966 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:17:21 crc kubenswrapper[5037]: E1126 14:17:21.908103 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:17:22 crc kubenswrapper[5037]: I1126 14:17:22.907251 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:17:22 crc kubenswrapper[5037]: E1126 14:17:22.907450 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:17:23 crc kubenswrapper[5037]: I1126 14:17:23.907143 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:17:23 crc kubenswrapper[5037]: I1126 14:17:23.907206 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:17:23 crc kubenswrapper[5037]: I1126 14:17:23.907280 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:17:23 crc kubenswrapper[5037]: E1126 14:17:23.908316 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:17:23 crc kubenswrapper[5037]: E1126 14:17:23.908702 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:17:23 crc kubenswrapper[5037]: E1126 14:17:23.908778 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:17:23 crc kubenswrapper[5037]: I1126 14:17:23.953238 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=7.953195629 podStartE2EDuration="7.953195629s" podCreationTimestamp="2025-11-26 14:17:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:17:23.949428146 +0000 UTC m=+110.746198350" watchObservedRunningTime="2025-11-26 14:17:23.953195629 +0000 UTC m=+110.749965833" Nov 26 14:17:24 crc kubenswrapper[5037]: I1126 14:17:24.907206 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:17:24 crc kubenswrapper[5037]: E1126 14:17:24.907450 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:17:25 crc kubenswrapper[5037]: I1126 14:17:25.907690 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:17:25 crc kubenswrapper[5037]: I1126 14:17:25.907975 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:17:25 crc kubenswrapper[5037]: E1126 14:17:25.908167 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:17:25 crc kubenswrapper[5037]: I1126 14:17:25.908213 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:17:25 crc kubenswrapper[5037]: E1126 14:17:25.908600 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:17:25 crc kubenswrapper[5037]: E1126 14:17:25.908457 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:17:26 crc kubenswrapper[5037]: I1126 14:17:26.907757 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:17:26 crc kubenswrapper[5037]: E1126 14:17:26.908832 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:17:26 crc kubenswrapper[5037]: I1126 14:17:26.910351 5037 scope.go:117] "RemoveContainer" containerID="7be4af5975c1ad5d347b761c03e870cfdbd3b774e45f13f59fc0af4bedb3772c" Nov 26 14:17:26 crc kubenswrapper[5037]: E1126 14:17:26.910611 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-fdhhj_openshift-ovn-kubernetes(454ee6da-70e5-4d30-89e5-19a35123a278)\"" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" Nov 26 14:17:27 crc kubenswrapper[5037]: I1126 14:17:27.907547 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:17:27 crc kubenswrapper[5037]: I1126 14:17:27.907573 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:17:27 crc kubenswrapper[5037]: I1126 14:17:27.907694 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:17:27 crc kubenswrapper[5037]: E1126 14:17:27.907891 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:17:27 crc kubenswrapper[5037]: E1126 14:17:27.907991 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:17:27 crc kubenswrapper[5037]: E1126 14:17:27.908175 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:17:28 crc kubenswrapper[5037]: I1126 14:17:28.907324 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:17:28 crc kubenswrapper[5037]: E1126 14:17:28.907639 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:17:29 crc kubenswrapper[5037]: I1126 14:17:29.667420 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-lxpjp_490e7d88-ae7f-45f9-ab12-598c33e3bc69/kube-multus/1.log" Nov 26 14:17:29 crc kubenswrapper[5037]: I1126 14:17:29.668752 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-lxpjp_490e7d88-ae7f-45f9-ab12-598c33e3bc69/kube-multus/0.log" Nov 26 14:17:29 crc kubenswrapper[5037]: I1126 14:17:29.668827 5037 generic.go:334] "Generic (PLEG): container finished" podID="490e7d88-ae7f-45f9-ab12-598c33e3bc69" containerID="58232632cfc8ddcd9e524acaf4b195314aeed89c0c6f892596b6020a82de4d38" exitCode=1 Nov 26 14:17:29 crc kubenswrapper[5037]: I1126 14:17:29.668890 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-lxpjp" event={"ID":"490e7d88-ae7f-45f9-ab12-598c33e3bc69","Type":"ContainerDied","Data":"58232632cfc8ddcd9e524acaf4b195314aeed89c0c6f892596b6020a82de4d38"} Nov 26 14:17:29 crc kubenswrapper[5037]: I1126 14:17:29.668963 5037 scope.go:117] "RemoveContainer" containerID="a07f2a67126ad6d2a2c7a5a4684d33a54354a456e04246c2539f95fda950c5f2" Nov 26 14:17:29 crc kubenswrapper[5037]: I1126 14:17:29.669858 5037 scope.go:117] "RemoveContainer" containerID="58232632cfc8ddcd9e524acaf4b195314aeed89c0c6f892596b6020a82de4d38" Nov 26 14:17:29 crc kubenswrapper[5037]: E1126 14:17:29.670192 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-lxpjp_openshift-multus(490e7d88-ae7f-45f9-ab12-598c33e3bc69)\"" pod="openshift-multus/multus-lxpjp" podUID="490e7d88-ae7f-45f9-ab12-598c33e3bc69" Nov 26 14:17:29 crc kubenswrapper[5037]: I1126 14:17:29.907557 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:17:29 crc kubenswrapper[5037]: I1126 14:17:29.907719 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:17:29 crc kubenswrapper[5037]: E1126 14:17:29.907911 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:17:29 crc kubenswrapper[5037]: I1126 14:17:29.907991 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:17:29 crc kubenswrapper[5037]: E1126 14:17:29.908165 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:17:29 crc kubenswrapper[5037]: E1126 14:17:29.908394 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:17:30 crc kubenswrapper[5037]: I1126 14:17:30.675958 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-lxpjp_490e7d88-ae7f-45f9-ab12-598c33e3bc69/kube-multus/1.log" Nov 26 14:17:30 crc kubenswrapper[5037]: I1126 14:17:30.908276 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:17:30 crc kubenswrapper[5037]: E1126 14:17:30.908601 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:17:31 crc kubenswrapper[5037]: I1126 14:17:31.908550 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:17:31 crc kubenswrapper[5037]: I1126 14:17:31.908652 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:17:31 crc kubenswrapper[5037]: E1126 14:17:31.908742 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:17:31 crc kubenswrapper[5037]: I1126 14:17:31.908843 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:17:31 crc kubenswrapper[5037]: E1126 14:17:31.909093 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:17:31 crc kubenswrapper[5037]: E1126 14:17:31.909264 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:17:32 crc kubenswrapper[5037]: I1126 14:17:32.913595 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:17:32 crc kubenswrapper[5037]: E1126 14:17:32.913762 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:17:33 crc kubenswrapper[5037]: E1126 14:17:33.877437 5037 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Nov 26 14:17:33 crc kubenswrapper[5037]: I1126 14:17:33.907603 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:17:33 crc kubenswrapper[5037]: I1126 14:17:33.907601 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:17:33 crc kubenswrapper[5037]: I1126 14:17:33.907764 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:17:33 crc kubenswrapper[5037]: E1126 14:17:33.910040 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:17:33 crc kubenswrapper[5037]: E1126 14:17:33.910144 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:17:33 crc kubenswrapper[5037]: E1126 14:17:33.910246 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:17:34 crc kubenswrapper[5037]: E1126 14:17:34.204117 5037 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 26 14:17:34 crc kubenswrapper[5037]: I1126 14:17:34.907841 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:17:34 crc kubenswrapper[5037]: E1126 14:17:34.908107 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:17:35 crc kubenswrapper[5037]: I1126 14:17:35.907727 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:17:35 crc kubenswrapper[5037]: I1126 14:17:35.907746 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:17:35 crc kubenswrapper[5037]: E1126 14:17:35.908139 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:17:35 crc kubenswrapper[5037]: E1126 14:17:35.908436 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:17:35 crc kubenswrapper[5037]: I1126 14:17:35.908927 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:17:35 crc kubenswrapper[5037]: E1126 14:17:35.909137 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:17:36 crc kubenswrapper[5037]: I1126 14:17:36.908127 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:17:36 crc kubenswrapper[5037]: E1126 14:17:36.908369 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:17:37 crc kubenswrapper[5037]: I1126 14:17:37.907895 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:17:37 crc kubenswrapper[5037]: I1126 14:17:37.908001 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:17:37 crc kubenswrapper[5037]: E1126 14:17:37.908069 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:17:37 crc kubenswrapper[5037]: I1126 14:17:37.908352 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:17:37 crc kubenswrapper[5037]: E1126 14:17:37.908355 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:17:37 crc kubenswrapper[5037]: E1126 14:17:37.908438 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:17:38 crc kubenswrapper[5037]: I1126 14:17:38.907260 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:17:38 crc kubenswrapper[5037]: E1126 14:17:38.907535 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:17:39 crc kubenswrapper[5037]: E1126 14:17:39.205897 5037 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 26 14:17:39 crc kubenswrapper[5037]: I1126 14:17:39.907699 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:17:39 crc kubenswrapper[5037]: I1126 14:17:39.907802 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:17:39 crc kubenswrapper[5037]: I1126 14:17:39.907830 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:17:39 crc kubenswrapper[5037]: E1126 14:17:39.907935 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:17:39 crc kubenswrapper[5037]: E1126 14:17:39.908205 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:17:39 crc kubenswrapper[5037]: E1126 14:17:39.908462 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:17:40 crc kubenswrapper[5037]: I1126 14:17:40.907229 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:17:40 crc kubenswrapper[5037]: E1126 14:17:40.908230 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:17:41 crc kubenswrapper[5037]: I1126 14:17:41.907969 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:17:41 crc kubenswrapper[5037]: I1126 14:17:41.908516 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:17:41 crc kubenswrapper[5037]: I1126 14:17:41.908568 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:17:41 crc kubenswrapper[5037]: I1126 14:17:41.908644 5037 scope.go:117] "RemoveContainer" containerID="58232632cfc8ddcd9e524acaf4b195314aeed89c0c6f892596b6020a82de4d38" Nov 26 14:17:41 crc kubenswrapper[5037]: E1126 14:17:41.908731 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:17:41 crc kubenswrapper[5037]: E1126 14:17:41.909223 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:17:41 crc kubenswrapper[5037]: E1126 14:17:41.909273 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:17:41 crc kubenswrapper[5037]: I1126 14:17:41.909693 5037 scope.go:117] "RemoveContainer" containerID="7be4af5975c1ad5d347b761c03e870cfdbd3b774e45f13f59fc0af4bedb3772c" Nov 26 14:17:42 crc kubenswrapper[5037]: I1126 14:17:42.725736 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fdhhj_454ee6da-70e5-4d30-89e5-19a35123a278/ovnkube-controller/3.log" Nov 26 14:17:42 crc kubenswrapper[5037]: I1126 14:17:42.728870 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" event={"ID":"454ee6da-70e5-4d30-89e5-19a35123a278","Type":"ContainerStarted","Data":"34de66ca7844b823c4e7913ac3c3ba14d0b8652a0741d3072c1cd913f6f68d6c"} Nov 26 14:17:42 crc kubenswrapper[5037]: I1126 14:17:42.729404 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:17:42 crc kubenswrapper[5037]: I1126 14:17:42.730695 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-lxpjp_490e7d88-ae7f-45f9-ab12-598c33e3bc69/kube-multus/1.log" Nov 26 14:17:42 crc kubenswrapper[5037]: I1126 14:17:42.730740 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-lxpjp" event={"ID":"490e7d88-ae7f-45f9-ab12-598c33e3bc69","Type":"ContainerStarted","Data":"d00fe2156839598797b806c7acdd6afda48f3c21d5efc19c29a33e6605c33e2a"} Nov 26 14:17:42 crc kubenswrapper[5037]: I1126 14:17:42.778117 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" podStartSLOduration=107.778092459 podStartE2EDuration="1m47.778092459s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:17:42.760912866 +0000 UTC m=+129.557683050" watchObservedRunningTime="2025-11-26 14:17:42.778092459 +0000 UTC m=+129.574862643" Nov 26 14:17:42 crc kubenswrapper[5037]: I1126 14:17:42.907430 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:17:42 crc kubenswrapper[5037]: E1126 14:17:42.907615 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:17:42 crc kubenswrapper[5037]: I1126 14:17:42.928123 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-wjch9"] Nov 26 14:17:43 crc kubenswrapper[5037]: I1126 14:17:43.734694 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:17:43 crc kubenswrapper[5037]: E1126 14:17:43.735465 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:17:43 crc kubenswrapper[5037]: I1126 14:17:43.907537 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:17:43 crc kubenswrapper[5037]: I1126 14:17:43.907593 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:17:43 crc kubenswrapper[5037]: I1126 14:17:43.907562 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:17:43 crc kubenswrapper[5037]: E1126 14:17:43.908997 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:17:43 crc kubenswrapper[5037]: E1126 14:17:43.909067 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:17:43 crc kubenswrapper[5037]: E1126 14:17:43.909198 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:17:44 crc kubenswrapper[5037]: E1126 14:17:44.206488 5037 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 26 14:17:45 crc kubenswrapper[5037]: I1126 14:17:45.908161 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:17:45 crc kubenswrapper[5037]: E1126 14:17:45.908402 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:17:45 crc kubenswrapper[5037]: I1126 14:17:45.908480 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:17:45 crc kubenswrapper[5037]: I1126 14:17:45.908561 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:17:45 crc kubenswrapper[5037]: E1126 14:17:45.908747 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:17:45 crc kubenswrapper[5037]: E1126 14:17:45.908581 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:17:45 crc kubenswrapper[5037]: I1126 14:17:45.908924 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:17:45 crc kubenswrapper[5037]: E1126 14:17:45.908988 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:17:47 crc kubenswrapper[5037]: I1126 14:17:47.907401 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:17:47 crc kubenswrapper[5037]: I1126 14:17:47.907616 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:17:47 crc kubenswrapper[5037]: I1126 14:17:47.907750 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:17:47 crc kubenswrapper[5037]: E1126 14:17:47.907910 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 14:17:47 crc kubenswrapper[5037]: I1126 14:17:47.908067 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:17:47 crc kubenswrapper[5037]: E1126 14:17:47.908079 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 14:17:47 crc kubenswrapper[5037]: E1126 14:17:47.908214 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-wjch9" podUID="b18a6f09-7a1e-4965-81e2-dde847147b41" Nov 26 14:17:47 crc kubenswrapper[5037]: E1126 14:17:47.908536 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 14:17:49 crc kubenswrapper[5037]: I1126 14:17:49.907452 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:17:49 crc kubenswrapper[5037]: I1126 14:17:49.907506 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:17:49 crc kubenswrapper[5037]: I1126 14:17:49.907681 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:17:49 crc kubenswrapper[5037]: I1126 14:17:49.907807 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:17:49 crc kubenswrapper[5037]: I1126 14:17:49.910207 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 26 14:17:49 crc kubenswrapper[5037]: I1126 14:17:49.910472 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 26 14:17:49 crc kubenswrapper[5037]: I1126 14:17:49.911194 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 26 14:17:49 crc kubenswrapper[5037]: I1126 14:17:49.911610 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 26 14:17:49 crc kubenswrapper[5037]: I1126 14:17:49.911789 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 26 14:17:49 crc kubenswrapper[5037]: I1126 14:17:49.912127 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.310522 5037 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.356809 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-lnds7"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.357495 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lnds7" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.361455 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-sk94z"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.362068 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-sk94z" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.363889 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-d5k2g"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.364924 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.365675 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-glk27"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.366176 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-c252f"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.366405 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-glk27" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.366554 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.366638 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.366796 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5fx6g"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.367081 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.367307 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5fx6g" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.367438 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.367486 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.367727 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.367810 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.367960 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.369316 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-l7mvc"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.369968 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-vwp8j"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.370479 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-vwp8j" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.370708 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-l7mvc" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.371354 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.371446 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.371856 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.371880 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.372115 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.372149 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.372117 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.372501 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.372595 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.372648 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.372801 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.372939 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.373012 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.373780 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.374859 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.377634 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-qfdqh"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.378213 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.378232 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-qfdqh" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.378322 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.378433 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.378454 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.378607 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.379643 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.379870 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.380114 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.380327 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.380603 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.380704 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.380339 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-82wc7"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.380638 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.381035 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.381458 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-l82xx"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.382333 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-82wc7" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.382507 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-l82xx" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.383771 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-bl76p"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.392821 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.395847 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-h42qk"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.397363 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-bl76p" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.402114 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-fpm9x"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.426115 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-fpm9x" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.427113 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-h42qk" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.429149 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-vq8zt"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.429718 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-lnds7"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.429888 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-vq8zt" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.431160 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.431354 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.431378 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.431634 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.431747 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.431771 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.431786 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.431963 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.432053 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.432103 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.432307 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.432446 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.432579 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.432445 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.432890 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.433208 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.433837 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-nwzvj"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.434467 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.435555 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.435734 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.435850 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.435764 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.436266 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.436306 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.436418 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.436425 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.436649 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.436726 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.436730 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.436770 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.436864 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.436871 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.436888 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.437017 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.449406 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-4pktg"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.450159 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-sk94z"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.450263 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-4pktg" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.452555 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.453820 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/d732fc51-cc4a-49a4-b296-2c40ddc33395-etcd-serving-ca\") pod \"apiserver-76f77b778f-d5k2g\" (UID: \"d732fc51-cc4a-49a4-b296-2c40ddc33395\") " pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.453871 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4471bb32-29a0-435a-b36b-94ab5766b1fb-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-lnds7\" (UID: \"4471bb32-29a0-435a-b36b-94ab5766b1fb\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lnds7" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.453898 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4471bb32-29a0-435a-b36b-94ab5766b1fb-serving-cert\") pod \"apiserver-7bbb656c7d-lnds7\" (UID: \"4471bb32-29a0-435a-b36b-94ab5766b1fb\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lnds7" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.453925 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d732fc51-cc4a-49a4-b296-2c40ddc33395-audit-dir\") pod \"apiserver-76f77b778f-d5k2g\" (UID: \"d732fc51-cc4a-49a4-b296-2c40ddc33395\") " pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.453961 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/d732fc51-cc4a-49a4-b296-2c40ddc33395-audit\") pod \"apiserver-76f77b778f-d5k2g\" (UID: \"d732fc51-cc4a-49a4-b296-2c40ddc33395\") " pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.453994 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/d732fc51-cc4a-49a4-b296-2c40ddc33395-node-pullsecrets\") pod \"apiserver-76f77b778f-d5k2g\" (UID: \"d732fc51-cc4a-49a4-b296-2c40ddc33395\") " pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.454025 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/4471bb32-29a0-435a-b36b-94ab5766b1fb-audit-dir\") pod \"apiserver-7bbb656c7d-lnds7\" (UID: \"4471bb32-29a0-435a-b36b-94ab5766b1fb\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lnds7" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.454133 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d732fc51-cc4a-49a4-b296-2c40ddc33395-serving-cert\") pod \"apiserver-76f77b778f-d5k2g\" (UID: \"d732fc51-cc4a-49a4-b296-2c40ddc33395\") " pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.454175 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4pb69\" (UniqueName: \"kubernetes.io/projected/ce2d6221-7202-44cf-a85e-dec10e764129-kube-api-access-4pb69\") pod \"controller-manager-879f6c89f-sk94z\" (UID: \"ce2d6221-7202-44cf-a85e-dec10e764129\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sk94z" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.454244 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5mxll\" (UniqueName: \"kubernetes.io/projected/4471bb32-29a0-435a-b36b-94ab5766b1fb-kube-api-access-5mxll\") pod \"apiserver-7bbb656c7d-lnds7\" (UID: \"4471bb32-29a0-435a-b36b-94ab5766b1fb\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lnds7" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.454309 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ce2d6221-7202-44cf-a85e-dec10e764129-client-ca\") pod \"controller-manager-879f6c89f-sk94z\" (UID: \"ce2d6221-7202-44cf-a85e-dec10e764129\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sk94z" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.454337 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/d732fc51-cc4a-49a4-b296-2c40ddc33395-image-import-ca\") pod \"apiserver-76f77b778f-d5k2g\" (UID: \"d732fc51-cc4a-49a4-b296-2c40ddc33395\") " pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.454366 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/4471bb32-29a0-435a-b36b-94ab5766b1fb-etcd-client\") pod \"apiserver-7bbb656c7d-lnds7\" (UID: \"4471bb32-29a0-435a-b36b-94ab5766b1fb\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lnds7" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.454387 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.454391 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fq85w\" (UniqueName: \"kubernetes.io/projected/d732fc51-cc4a-49a4-b296-2c40ddc33395-kube-api-access-fq85w\") pod \"apiserver-76f77b778f-d5k2g\" (UID: \"d732fc51-cc4a-49a4-b296-2c40ddc33395\") " pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.454467 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/4471bb32-29a0-435a-b36b-94ab5766b1fb-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-lnds7\" (UID: \"4471bb32-29a0-435a-b36b-94ab5766b1fb\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lnds7" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.454492 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce2d6221-7202-44cf-a85e-dec10e764129-config\") pod \"controller-manager-879f6c89f-sk94z\" (UID: \"ce2d6221-7202-44cf-a85e-dec10e764129\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sk94z" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.454515 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ce2d6221-7202-44cf-a85e-dec10e764129-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-sk94z\" (UID: \"ce2d6221-7202-44cf-a85e-dec10e764129\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sk94z" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.454540 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/d732fc51-cc4a-49a4-b296-2c40ddc33395-etcd-client\") pod \"apiserver-76f77b778f-d5k2g\" (UID: \"d732fc51-cc4a-49a4-b296-2c40ddc33395\") " pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.454631 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/d732fc51-cc4a-49a4-b296-2c40ddc33395-encryption-config\") pod \"apiserver-76f77b778f-d5k2g\" (UID: \"d732fc51-cc4a-49a4-b296-2c40ddc33395\") " pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.454660 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/4471bb32-29a0-435a-b36b-94ab5766b1fb-encryption-config\") pod \"apiserver-7bbb656c7d-lnds7\" (UID: \"4471bb32-29a0-435a-b36b-94ab5766b1fb\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lnds7" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.454676 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d732fc51-cc4a-49a4-b296-2c40ddc33395-trusted-ca-bundle\") pod \"apiserver-76f77b778f-d5k2g\" (UID: \"d732fc51-cc4a-49a4-b296-2c40ddc33395\") " pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.454694 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ce2d6221-7202-44cf-a85e-dec10e764129-serving-cert\") pod \"controller-manager-879f6c89f-sk94z\" (UID: \"ce2d6221-7202-44cf-a85e-dec10e764129\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sk94z" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.454708 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/4471bb32-29a0-435a-b36b-94ab5766b1fb-audit-policies\") pod \"apiserver-7bbb656c7d-lnds7\" (UID: \"4471bb32-29a0-435a-b36b-94ab5766b1fb\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lnds7" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.454731 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d732fc51-cc4a-49a4-b296-2c40ddc33395-config\") pod \"apiserver-76f77b778f-d5k2g\" (UID: \"d732fc51-cc4a-49a4-b296-2c40ddc33395\") " pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.456633 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.460529 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.461116 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.461309 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.461468 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.463871 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.464358 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5fx6g"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.464624 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.465265 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.465302 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-qfdqh"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.465459 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.465599 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.468575 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.468694 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.468896 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.469079 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.469226 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.469240 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.469369 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.469397 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.469895 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.470694 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.470892 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.471242 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.471349 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.496861 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.497187 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-82wc7"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.497673 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.499193 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-d5k2g"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.503756 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.504130 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.505496 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.521131 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.521490 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.522163 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.522486 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.522831 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.525448 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.528863 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.528899 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.529109 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-c252f"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.530470 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-9z9lt"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.531113 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-9z9lt" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.532192 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ktvdg"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.533105 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ktvdg" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.533193 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-h42qk"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.534504 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8wsp6"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.535069 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8wsp6" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.535444 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jm5zr"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.535880 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jm5zr" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.537364 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-gncr9"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.537882 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-7w8nb"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.538443 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-7w8nb" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.538580 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-gncr9" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.540265 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-txjgw"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.540696 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-txjgw" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.543920 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-5kk6k"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.545006 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5kk6k" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.546523 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-g2jw7"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.547110 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-g2jw7" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.555335 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d732fc51-cc4a-49a4-b296-2c40ddc33395-config\") pod \"apiserver-76f77b778f-d5k2g\" (UID: \"d732fc51-cc4a-49a4-b296-2c40ddc33395\") " pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.555380 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tmmt7\" (UniqueName: \"kubernetes.io/projected/1e46b121-f4d1-402d-8af2-425b4af276dd-kube-api-access-tmmt7\") pod \"openshift-config-operator-7777fb866f-fpm9x\" (UID: \"1e46b121-f4d1-402d-8af2-425b4af276dd\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-fpm9x" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.555406 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gtcl6\" (UniqueName: \"kubernetes.io/projected/0f85943c-8848-42b9-a4e8-43f2689ba52f-kube-api-access-gtcl6\") pod \"machine-approver-56656f9798-bl76p\" (UID: \"0f85943c-8848-42b9-a4e8-43f2689ba52f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-bl76p" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.555431 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/0f85943c-8848-42b9-a4e8-43f2689ba52f-machine-approver-tls\") pod \"machine-approver-56656f9798-bl76p\" (UID: \"0f85943c-8848-42b9-a4e8-43f2689ba52f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-bl76p" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.555450 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1f3de401-4fc0-48c3-9ecc-0a994b8d5f72-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-h42qk\" (UID: \"1f3de401-4fc0-48c3-9ecc-0a994b8d5f72\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-h42qk" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.555470 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/d732fc51-cc4a-49a4-b296-2c40ddc33395-etcd-serving-ca\") pod \"apiserver-76f77b778f-d5k2g\" (UID: \"d732fc51-cc4a-49a4-b296-2c40ddc33395\") " pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.555491 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4471bb32-29a0-435a-b36b-94ab5766b1fb-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-lnds7\" (UID: \"4471bb32-29a0-435a-b36b-94ab5766b1fb\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lnds7" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.555511 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4471bb32-29a0-435a-b36b-94ab5766b1fb-serving-cert\") pod \"apiserver-7bbb656c7d-lnds7\" (UID: \"4471bb32-29a0-435a-b36b-94ab5766b1fb\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lnds7" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.555528 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d732fc51-cc4a-49a4-b296-2c40ddc33395-audit-dir\") pod \"apiserver-76f77b778f-d5k2g\" (UID: \"d732fc51-cc4a-49a4-b296-2c40ddc33395\") " pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.555548 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/d732fc51-cc4a-49a4-b296-2c40ddc33395-audit\") pod \"apiserver-76f77b778f-d5k2g\" (UID: \"d732fc51-cc4a-49a4-b296-2c40ddc33395\") " pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.555566 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/c324fe4d-b0c6-4c0a-9dd7-10aa517dcce7-images\") pod \"machine-api-operator-5694c8668f-l7mvc\" (UID: \"c324fe4d-b0c6-4c0a-9dd7-10aa517dcce7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-l7mvc" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.555587 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qn8dp\" (UniqueName: \"kubernetes.io/projected/1f3de401-4fc0-48c3-9ecc-0a994b8d5f72-kube-api-access-qn8dp\") pod \"cluster-image-registry-operator-dc59b4c8b-h42qk\" (UID: \"1f3de401-4fc0-48c3-9ecc-0a994b8d5f72\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-h42qk" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.555607 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/d8d223ba-d8fb-48bd-9654-4e8146097407-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-l82xx\" (UID: \"d8d223ba-d8fb-48bd-9654-4e8146097407\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-l82xx" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.555624 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1e46b121-f4d1-402d-8af2-425b4af276dd-serving-cert\") pod \"openshift-config-operator-7777fb866f-fpm9x\" (UID: \"1e46b121-f4d1-402d-8af2-425b4af276dd\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-fpm9x" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.555645 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/d732fc51-cc4a-49a4-b296-2c40ddc33395-node-pullsecrets\") pod \"apiserver-76f77b778f-d5k2g\" (UID: \"d732fc51-cc4a-49a4-b296-2c40ddc33395\") " pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.555663 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/4471bb32-29a0-435a-b36b-94ab5766b1fb-audit-dir\") pod \"apiserver-7bbb656c7d-lnds7\" (UID: \"4471bb32-29a0-435a-b36b-94ab5766b1fb\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lnds7" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.555680 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d732fc51-cc4a-49a4-b296-2c40ddc33395-serving-cert\") pod \"apiserver-76f77b778f-d5k2g\" (UID: \"d732fc51-cc4a-49a4-b296-2c40ddc33395\") " pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.555713 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4pb69\" (UniqueName: \"kubernetes.io/projected/ce2d6221-7202-44cf-a85e-dec10e764129-kube-api-access-4pb69\") pod \"controller-manager-879f6c89f-sk94z\" (UID: \"ce2d6221-7202-44cf-a85e-dec10e764129\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sk94z" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.555728 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5mxll\" (UniqueName: \"kubernetes.io/projected/4471bb32-29a0-435a-b36b-94ab5766b1fb-kube-api-access-5mxll\") pod \"apiserver-7bbb656c7d-lnds7\" (UID: \"4471bb32-29a0-435a-b36b-94ab5766b1fb\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lnds7" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.555744 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ce2d6221-7202-44cf-a85e-dec10e764129-client-ca\") pod \"controller-manager-879f6c89f-sk94z\" (UID: \"ce2d6221-7202-44cf-a85e-dec10e764129\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sk94z" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.555764 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/d732fc51-cc4a-49a4-b296-2c40ddc33395-image-import-ca\") pod \"apiserver-76f77b778f-d5k2g\" (UID: \"d732fc51-cc4a-49a4-b296-2c40ddc33395\") " pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.555781 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wtn7l\" (UniqueName: \"kubernetes.io/projected/d8d223ba-d8fb-48bd-9654-4e8146097407-kube-api-access-wtn7l\") pod \"cluster-samples-operator-665b6dd947-l82xx\" (UID: \"d8d223ba-d8fb-48bd-9654-4e8146097407\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-l82xx" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.555805 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/c324fe4d-b0c6-4c0a-9dd7-10aa517dcce7-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-l7mvc\" (UID: \"c324fe4d-b0c6-4c0a-9dd7-10aa517dcce7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-l7mvc" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.555822 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1f3de401-4fc0-48c3-9ecc-0a994b8d5f72-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-h42qk\" (UID: \"1f3de401-4fc0-48c3-9ecc-0a994b8d5f72\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-h42qk" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.555838 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0f85943c-8848-42b9-a4e8-43f2689ba52f-auth-proxy-config\") pod \"machine-approver-56656f9798-bl76p\" (UID: \"0f85943c-8848-42b9-a4e8-43f2689ba52f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-bl76p" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.555856 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/1e46b121-f4d1-402d-8af2-425b4af276dd-available-featuregates\") pod \"openshift-config-operator-7777fb866f-fpm9x\" (UID: \"1e46b121-f4d1-402d-8af2-425b4af276dd\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-fpm9x" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.555872 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f85943c-8848-42b9-a4e8-43f2689ba52f-config\") pod \"machine-approver-56656f9798-bl76p\" (UID: \"0f85943c-8848-42b9-a4e8-43f2689ba52f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-bl76p" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.555889 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/4471bb32-29a0-435a-b36b-94ab5766b1fb-etcd-client\") pod \"apiserver-7bbb656c7d-lnds7\" (UID: \"4471bb32-29a0-435a-b36b-94ab5766b1fb\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lnds7" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.555908 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fq85w\" (UniqueName: \"kubernetes.io/projected/d732fc51-cc4a-49a4-b296-2c40ddc33395-kube-api-access-fq85w\") pod \"apiserver-76f77b778f-d5k2g\" (UID: \"d732fc51-cc4a-49a4-b296-2c40ddc33395\") " pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.555924 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c324fe4d-b0c6-4c0a-9dd7-10aa517dcce7-config\") pod \"machine-api-operator-5694c8668f-l7mvc\" (UID: \"c324fe4d-b0c6-4c0a-9dd7-10aa517dcce7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-l7mvc" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.555949 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/4471bb32-29a0-435a-b36b-94ab5766b1fb-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-lnds7\" (UID: \"4471bb32-29a0-435a-b36b-94ab5766b1fb\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lnds7" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.555966 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7tzdz\" (UniqueName: \"kubernetes.io/projected/c324fe4d-b0c6-4c0a-9dd7-10aa517dcce7-kube-api-access-7tzdz\") pod \"machine-api-operator-5694c8668f-l7mvc\" (UID: \"c324fe4d-b0c6-4c0a-9dd7-10aa517dcce7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-l7mvc" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.555986 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce2d6221-7202-44cf-a85e-dec10e764129-config\") pod \"controller-manager-879f6c89f-sk94z\" (UID: \"ce2d6221-7202-44cf-a85e-dec10e764129\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sk94z" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.556004 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ce2d6221-7202-44cf-a85e-dec10e764129-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-sk94z\" (UID: \"ce2d6221-7202-44cf-a85e-dec10e764129\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sk94z" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.556023 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/1f3de401-4fc0-48c3-9ecc-0a994b8d5f72-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-h42qk\" (UID: \"1f3de401-4fc0-48c3-9ecc-0a994b8d5f72\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-h42qk" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.556041 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/d732fc51-cc4a-49a4-b296-2c40ddc33395-etcd-client\") pod \"apiserver-76f77b778f-d5k2g\" (UID: \"d732fc51-cc4a-49a4-b296-2c40ddc33395\") " pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.556058 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/d732fc51-cc4a-49a4-b296-2c40ddc33395-encryption-config\") pod \"apiserver-76f77b778f-d5k2g\" (UID: \"d732fc51-cc4a-49a4-b296-2c40ddc33395\") " pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.556073 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/4471bb32-29a0-435a-b36b-94ab5766b1fb-encryption-config\") pod \"apiserver-7bbb656c7d-lnds7\" (UID: \"4471bb32-29a0-435a-b36b-94ab5766b1fb\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lnds7" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.556091 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d732fc51-cc4a-49a4-b296-2c40ddc33395-trusted-ca-bundle\") pod \"apiserver-76f77b778f-d5k2g\" (UID: \"d732fc51-cc4a-49a4-b296-2c40ddc33395\") " pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.556118 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ce2d6221-7202-44cf-a85e-dec10e764129-serving-cert\") pod \"controller-manager-879f6c89f-sk94z\" (UID: \"ce2d6221-7202-44cf-a85e-dec10e764129\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sk94z" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.556136 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/4471bb32-29a0-435a-b36b-94ab5766b1fb-audit-policies\") pod \"apiserver-7bbb656c7d-lnds7\" (UID: \"4471bb32-29a0-435a-b36b-94ab5766b1fb\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lnds7" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.556267 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/d732fc51-cc4a-49a4-b296-2c40ddc33395-etcd-serving-ca\") pod \"apiserver-76f77b778f-d5k2g\" (UID: \"d732fc51-cc4a-49a4-b296-2c40ddc33395\") " pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.556335 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d732fc51-cc4a-49a4-b296-2c40ddc33395-config\") pod \"apiserver-76f77b778f-d5k2g\" (UID: \"d732fc51-cc4a-49a4-b296-2c40ddc33395\") " pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.556431 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d732fc51-cc4a-49a4-b296-2c40ddc33395-audit-dir\") pod \"apiserver-76f77b778f-d5k2g\" (UID: \"d732fc51-cc4a-49a4-b296-2c40ddc33395\") " pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.557035 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4471bb32-29a0-435a-b36b-94ab5766b1fb-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-lnds7\" (UID: \"4471bb32-29a0-435a-b36b-94ab5766b1fb\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lnds7" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.557128 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/4471bb32-29a0-435a-b36b-94ab5766b1fb-audit-policies\") pod \"apiserver-7bbb656c7d-lnds7\" (UID: \"4471bb32-29a0-435a-b36b-94ab5766b1fb\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lnds7" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.557210 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/d732fc51-cc4a-49a4-b296-2c40ddc33395-audit\") pod \"apiserver-76f77b778f-d5k2g\" (UID: \"d732fc51-cc4a-49a4-b296-2c40ddc33395\") " pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.557796 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/4471bb32-29a0-435a-b36b-94ab5766b1fb-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-lnds7\" (UID: \"4471bb32-29a0-435a-b36b-94ab5766b1fb\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lnds7" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.557993 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/d732fc51-cc4a-49a4-b296-2c40ddc33395-image-import-ca\") pod \"apiserver-76f77b778f-d5k2g\" (UID: \"d732fc51-cc4a-49a4-b296-2c40ddc33395\") " pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.558166 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ce2d6221-7202-44cf-a85e-dec10e764129-client-ca\") pod \"controller-manager-879f6c89f-sk94z\" (UID: \"ce2d6221-7202-44cf-a85e-dec10e764129\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sk94z" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.558987 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ce2d6221-7202-44cf-a85e-dec10e764129-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-sk94z\" (UID: \"ce2d6221-7202-44cf-a85e-dec10e764129\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sk94z" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.560004 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/d732fc51-cc4a-49a4-b296-2c40ddc33395-node-pullsecrets\") pod \"apiserver-76f77b778f-d5k2g\" (UID: \"d732fc51-cc4a-49a4-b296-2c40ddc33395\") " pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.560235 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce2d6221-7202-44cf-a85e-dec10e764129-config\") pod \"controller-manager-879f6c89f-sk94z\" (UID: \"ce2d6221-7202-44cf-a85e-dec10e764129\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sk94z" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.560243 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/4471bb32-29a0-435a-b36b-94ab5766b1fb-audit-dir\") pod \"apiserver-7bbb656c7d-lnds7\" (UID: \"4471bb32-29a0-435a-b36b-94ab5766b1fb\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lnds7" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.561843 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lt8gn"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.562177 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d732fc51-cc4a-49a4-b296-2c40ddc33395-trusted-ca-bundle\") pod \"apiserver-76f77b778f-d5k2g\" (UID: \"d732fc51-cc4a-49a4-b296-2c40ddc33395\") " pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.562860 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lt8gn" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.563504 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/d732fc51-cc4a-49a4-b296-2c40ddc33395-encryption-config\") pod \"apiserver-76f77b778f-d5k2g\" (UID: \"d732fc51-cc4a-49a4-b296-2c40ddc33395\") " pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.563527 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/d732fc51-cc4a-49a4-b296-2c40ddc33395-etcd-client\") pod \"apiserver-76f77b778f-d5k2g\" (UID: \"d732fc51-cc4a-49a4-b296-2c40ddc33395\") " pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.563977 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-n4nhc"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.565661 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-n4nhc" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.567394 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jnbdg"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.568063 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jnbdg" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.568573 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/4471bb32-29a0-435a-b36b-94ab5766b1fb-encryption-config\") pod \"apiserver-7bbb656c7d-lnds7\" (UID: \"4471bb32-29a0-435a-b36b-94ab5766b1fb\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lnds7" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.568621 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-4pktg"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.569890 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/4471bb32-29a0-435a-b36b-94ab5766b1fb-etcd-client\") pod \"apiserver-7bbb656c7d-lnds7\" (UID: \"4471bb32-29a0-435a-b36b-94ab5766b1fb\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lnds7" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.570088 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ce2d6221-7202-44cf-a85e-dec10e764129-serving-cert\") pod \"controller-manager-879f6c89f-sk94z\" (UID: \"ce2d6221-7202-44cf-a85e-dec10e764129\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sk94z" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.572119 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-nwzvj"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.574202 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-nnqb4"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.575076 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-7wr5z"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.576935 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-7wr5z" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.577196 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nnqb4" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.579611 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-9hccb"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.581141 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-9hccb" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.581463 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402775-ts75f"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.581912 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402775-ts75f" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.582816 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d732fc51-cc4a-49a4-b296-2c40ddc33395-serving-cert\") pod \"apiserver-76f77b778f-d5k2g\" (UID: \"d732fc51-cc4a-49a4-b296-2c40ddc33395\") " pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.583683 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.583869 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-kj5z5"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.587122 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kj5z5" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.589113 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-8h7nh"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.592410 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4471bb32-29a0-435a-b36b-94ab5766b1fb-serving-cert\") pod \"apiserver-7bbb656c7d-lnds7\" (UID: \"4471bb32-29a0-435a-b36b-94ab5766b1fb\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lnds7" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.596888 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jb467"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.597879 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-6kv25"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.598418 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-8h7nh" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.598988 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jb467" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.599412 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-6kv25" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.605695 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.612260 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8wsp6"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.616321 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-cwfd8"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.618794 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p7j5p"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.620091 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-cwfd8" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.620162 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jm5zr"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.620645 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p7j5p" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.622376 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.623855 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-9z9lt"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.625206 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-glk27"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.626442 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-l82xx"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.627695 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-vq8zt"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.629308 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-l7mvc"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.630424 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-vwp8j"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.631601 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-5kk6k"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.632620 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-7w8nb"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.633804 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-g2jw7"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.635192 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-zz4kv"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.636750 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-fpm9x"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.637107 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-zz4kv" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.637742 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-9hccb"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.639076 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-85ns4"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.640092 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-nnqb4"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.640182 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-85ns4" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.641245 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-kj5z5"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.641394 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.642981 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-8h7nh"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.644176 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ktvdg"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.645518 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jb467"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.647092 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jnbdg"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.648344 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-txjgw"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.650130 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-85ns4"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.651567 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-6kv25"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.652965 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-n4nhc"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.654616 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-zz4kv"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.655679 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lt8gn"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.656823 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-7wr5z"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.657064 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tmmt7\" (UniqueName: \"kubernetes.io/projected/1e46b121-f4d1-402d-8af2-425b4af276dd-kube-api-access-tmmt7\") pod \"openshift-config-operator-7777fb866f-fpm9x\" (UID: \"1e46b121-f4d1-402d-8af2-425b4af276dd\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-fpm9x" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.657098 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/0f85943c-8848-42b9-a4e8-43f2689ba52f-machine-approver-tls\") pod \"machine-approver-56656f9798-bl76p\" (UID: \"0f85943c-8848-42b9-a4e8-43f2689ba52f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-bl76p" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.657121 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gtcl6\" (UniqueName: \"kubernetes.io/projected/0f85943c-8848-42b9-a4e8-43f2689ba52f-kube-api-access-gtcl6\") pod \"machine-approver-56656f9798-bl76p\" (UID: \"0f85943c-8848-42b9-a4e8-43f2689ba52f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-bl76p" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.657145 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1f3de401-4fc0-48c3-9ecc-0a994b8d5f72-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-h42qk\" (UID: \"1f3de401-4fc0-48c3-9ecc-0a994b8d5f72\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-h42qk" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.657170 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/c324fe4d-b0c6-4c0a-9dd7-10aa517dcce7-images\") pod \"machine-api-operator-5694c8668f-l7mvc\" (UID: \"c324fe4d-b0c6-4c0a-9dd7-10aa517dcce7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-l7mvc" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.657192 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qn8dp\" (UniqueName: \"kubernetes.io/projected/1f3de401-4fc0-48c3-9ecc-0a994b8d5f72-kube-api-access-qn8dp\") pod \"cluster-image-registry-operator-dc59b4c8b-h42qk\" (UID: \"1f3de401-4fc0-48c3-9ecc-0a994b8d5f72\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-h42qk" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.657215 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1e46b121-f4d1-402d-8af2-425b4af276dd-serving-cert\") pod \"openshift-config-operator-7777fb866f-fpm9x\" (UID: \"1e46b121-f4d1-402d-8af2-425b4af276dd\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-fpm9x" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.657247 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/d8d223ba-d8fb-48bd-9654-4e8146097407-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-l82xx\" (UID: \"d8d223ba-d8fb-48bd-9654-4e8146097407\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-l82xx" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.657363 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wtn7l\" (UniqueName: \"kubernetes.io/projected/d8d223ba-d8fb-48bd-9654-4e8146097407-kube-api-access-wtn7l\") pod \"cluster-samples-operator-665b6dd947-l82xx\" (UID: \"d8d223ba-d8fb-48bd-9654-4e8146097407\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-l82xx" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.657396 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/c324fe4d-b0c6-4c0a-9dd7-10aa517dcce7-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-l7mvc\" (UID: \"c324fe4d-b0c6-4c0a-9dd7-10aa517dcce7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-l7mvc" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.657414 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1f3de401-4fc0-48c3-9ecc-0a994b8d5f72-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-h42qk\" (UID: \"1f3de401-4fc0-48c3-9ecc-0a994b8d5f72\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-h42qk" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.657432 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0f85943c-8848-42b9-a4e8-43f2689ba52f-auth-proxy-config\") pod \"machine-approver-56656f9798-bl76p\" (UID: \"0f85943c-8848-42b9-a4e8-43f2689ba52f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-bl76p" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.657449 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/1e46b121-f4d1-402d-8af2-425b4af276dd-available-featuregates\") pod \"openshift-config-operator-7777fb866f-fpm9x\" (UID: \"1e46b121-f4d1-402d-8af2-425b4af276dd\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-fpm9x" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.657465 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f85943c-8848-42b9-a4e8-43f2689ba52f-config\") pod \"machine-approver-56656f9798-bl76p\" (UID: \"0f85943c-8848-42b9-a4e8-43f2689ba52f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-bl76p" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.657481 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c324fe4d-b0c6-4c0a-9dd7-10aa517dcce7-config\") pod \"machine-api-operator-5694c8668f-l7mvc\" (UID: \"c324fe4d-b0c6-4c0a-9dd7-10aa517dcce7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-l7mvc" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.657511 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7tzdz\" (UniqueName: \"kubernetes.io/projected/c324fe4d-b0c6-4c0a-9dd7-10aa517dcce7-kube-api-access-7tzdz\") pod \"machine-api-operator-5694c8668f-l7mvc\" (UID: \"c324fe4d-b0c6-4c0a-9dd7-10aa517dcce7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-l7mvc" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.657529 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/1f3de401-4fc0-48c3-9ecc-0a994b8d5f72-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-h42qk\" (UID: \"1f3de401-4fc0-48c3-9ecc-0a994b8d5f72\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-h42qk" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.659348 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f85943c-8848-42b9-a4e8-43f2689ba52f-config\") pod \"machine-approver-56656f9798-bl76p\" (UID: \"0f85943c-8848-42b9-a4e8-43f2689ba52f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-bl76p" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.659854 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1f3de401-4fc0-48c3-9ecc-0a994b8d5f72-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-h42qk\" (UID: \"1f3de401-4fc0-48c3-9ecc-0a994b8d5f72\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-h42qk" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.659873 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402775-ts75f"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.659923 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p7j5p"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.659935 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-cwfd8"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.660125 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/c324fe4d-b0c6-4c0a-9dd7-10aa517dcce7-images\") pod \"machine-api-operator-5694c8668f-l7mvc\" (UID: \"c324fe4d-b0c6-4c0a-9dd7-10aa517dcce7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-l7mvc" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.660364 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0f85943c-8848-42b9-a4e8-43f2689ba52f-auth-proxy-config\") pod \"machine-approver-56656f9798-bl76p\" (UID: \"0f85943c-8848-42b9-a4e8-43f2689ba52f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-bl76p" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.660600 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c324fe4d-b0c6-4c0a-9dd7-10aa517dcce7-config\") pod \"machine-api-operator-5694c8668f-l7mvc\" (UID: \"c324fe4d-b0c6-4c0a-9dd7-10aa517dcce7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-l7mvc" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.660887 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/0f85943c-8848-42b9-a4e8-43f2689ba52f-machine-approver-tls\") pod \"machine-approver-56656f9798-bl76p\" (UID: \"0f85943c-8848-42b9-a4e8-43f2689ba52f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-bl76p" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.661228 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-m2pbv"] Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.661382 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/1e46b121-f4d1-402d-8af2-425b4af276dd-available-featuregates\") pod \"openshift-config-operator-7777fb866f-fpm9x\" (UID: \"1e46b121-f4d1-402d-8af2-425b4af276dd\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-fpm9x" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.661835 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/c324fe4d-b0c6-4c0a-9dd7-10aa517dcce7-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-l7mvc\" (UID: \"c324fe4d-b0c6-4c0a-9dd7-10aa517dcce7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-l7mvc" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.661906 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-m2pbv" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.664585 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1e46b121-f4d1-402d-8af2-425b4af276dd-serving-cert\") pod \"openshift-config-operator-7777fb866f-fpm9x\" (UID: \"1e46b121-f4d1-402d-8af2-425b4af276dd\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-fpm9x" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.665374 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/d8d223ba-d8fb-48bd-9654-4e8146097407-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-l82xx\" (UID: \"d8d223ba-d8fb-48bd-9654-4e8146097407\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-l82xx" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.673530 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/1f3de401-4fc0-48c3-9ecc-0a994b8d5f72-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-h42qk\" (UID: \"1f3de401-4fc0-48c3-9ecc-0a994b8d5f72\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-h42qk" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.674659 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.681139 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.700991 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.721039 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.741516 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.761342 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.781994 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.800698 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.821876 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.842054 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.860759 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.881037 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.901572 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.920712 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.941913 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.960938 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 26 14:17:53 crc kubenswrapper[5037]: I1126 14:17:53.980935 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.000839 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.021404 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.042552 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.061969 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.081336 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.101587 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.121198 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.142011 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.162108 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.181250 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.201760 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.229557 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.241523 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.282119 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.301730 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.321472 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.341551 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.382143 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fq85w\" (UniqueName: \"kubernetes.io/projected/d732fc51-cc4a-49a4-b296-2c40ddc33395-kube-api-access-fq85w\") pod \"apiserver-76f77b778f-d5k2g\" (UID: \"d732fc51-cc4a-49a4-b296-2c40ddc33395\") " pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.402365 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4pb69\" (UniqueName: \"kubernetes.io/projected/ce2d6221-7202-44cf-a85e-dec10e764129-kube-api-access-4pb69\") pod \"controller-manager-879f6c89f-sk94z\" (UID: \"ce2d6221-7202-44cf-a85e-dec10e764129\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sk94z" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.418077 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5mxll\" (UniqueName: \"kubernetes.io/projected/4471bb32-29a0-435a-b36b-94ab5766b1fb-kube-api-access-5mxll\") pod \"apiserver-7bbb656c7d-lnds7\" (UID: \"4471bb32-29a0-435a-b36b-94ab5766b1fb\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lnds7" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.421954 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.442068 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.462086 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.482190 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.501723 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.521574 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.540941 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.561219 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.579697 5037 request.go:700] Waited for 1.013619993s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-storage-version-migrator-operator/configmaps?fieldSelector=metadata.name%3Dconfig&limit=500&resourceVersion=0 Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.580954 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.583386 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lnds7" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.597260 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-sk94z" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.602323 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.633576 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.642420 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.664018 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.676721 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/25030986-5796-4784-accd-c465c7c2daa3-console-serving-cert\") pod \"console-f9d7485db-qfdqh\" (UID: \"25030986-5796-4784-accd-c465c7c2daa3\") " pod="openshift-console/console-f9d7485db-qfdqh" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.676834 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7x8w\" (UniqueName: \"kubernetes.io/projected/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-kube-api-access-w7x8w\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.676954 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.677098 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b0d344f6-3e27-4724-a70d-3b91cfb19576-config\") pod \"openshift-apiserver-operator-796bbdcf4f-5fx6g\" (UID: \"b0d344f6-3e27-4724-a70d-3b91cfb19576\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5fx6g" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.677231 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.677445 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e109a3ac-c4bc-4f0b-a2d0-ed4eea1f63c4-config\") pod \"console-operator-58897d9998-vq8zt\" (UID: \"e109a3ac-c4bc-4f0b-a2d0-ed4eea1f63c4\") " pod="openshift-console-operator/console-operator-58897d9998-vq8zt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.677593 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-registry-certificates\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.677643 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.677857 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/25030986-5796-4784-accd-c465c7c2daa3-oauth-serving-cert\") pod \"console-f9d7485db-qfdqh\" (UID: \"25030986-5796-4784-accd-c465c7c2daa3\") " pod="openshift-console/console-f9d7485db-qfdqh" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.678114 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.678270 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.678460 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/25030986-5796-4784-accd-c465c7c2daa3-console-oauth-config\") pod \"console-f9d7485db-qfdqh\" (UID: \"25030986-5796-4784-accd-c465c7c2daa3\") " pod="openshift-console/console-f9d7485db-qfdqh" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.678590 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:54 crc kubenswrapper[5037]: E1126 14:17:54.681063 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:17:55.181025874 +0000 UTC m=+141.977796218 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.681383 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/af3320db-8161-492c-89ef-79aff52e898c-service-ca-bundle\") pod \"authentication-operator-69f744f599-82wc7\" (UID: \"af3320db-8161-492c-89ef-79aff52e898c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-82wc7" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.681499 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/25030986-5796-4784-accd-c465c7c2daa3-console-config\") pod \"console-f9d7485db-qfdqh\" (UID: \"25030986-5796-4784-accd-c465c7c2daa3\") " pod="openshift-console/console-f9d7485db-qfdqh" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.681556 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b0d344f6-3e27-4724-a70d-3b91cfb19576-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-5fx6g\" (UID: \"b0d344f6-3e27-4724-a70d-3b91cfb19576\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5fx6g" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.681666 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/25030986-5796-4784-accd-c465c7c2daa3-service-ca\") pod \"console-f9d7485db-qfdqh\" (UID: \"25030986-5796-4784-accd-c465c7c2daa3\") " pod="openshift-console/console-f9d7485db-qfdqh" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.681767 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7cl2f\" (UniqueName: \"kubernetes.io/projected/9bd7b7cc-e80c-40aa-a6b8-8b9272ccdfb7-kube-api-access-7cl2f\") pod \"dns-operator-744455d44c-4pktg\" (UID: \"9bd7b7cc-e80c-40aa-a6b8-8b9272ccdfb7\") " pod="openshift-dns-operator/dns-operator-744455d44c-4pktg" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.681826 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-installation-pull-secrets\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.681865 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-bound-sa-token\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.681914 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e109a3ac-c4bc-4f0b-a2d0-ed4eea1f63c4-trusted-ca\") pod \"console-operator-58897d9998-vq8zt\" (UID: \"e109a3ac-c4bc-4f0b-a2d0-ed4eea1f63c4\") " pod="openshift-console-operator/console-operator-58897d9998-vq8zt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.682006 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/290387b2-4285-4359-bfdc-f89128f0c0a2-client-ca\") pod \"route-controller-manager-6576b87f9c-glk27\" (UID: \"290387b2-4285-4359-bfdc-f89128f0c0a2\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-glk27" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.682547 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rsxzw\" (UniqueName: \"kubernetes.io/projected/13b910b7-69a1-438a-9ebe-d865adc99607-kube-api-access-rsxzw\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.682620 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-trusted-ca\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.682767 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/13b910b7-69a1-438a-9ebe-d865adc99607-audit-dir\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.682841 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/9bd7b7cc-e80c-40aa-a6b8-8b9272ccdfb7-metrics-tls\") pod \"dns-operator-744455d44c-4pktg\" (UID: \"9bd7b7cc-e80c-40aa-a6b8-8b9272ccdfb7\") " pod="openshift-dns-operator/dns-operator-744455d44c-4pktg" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.682868 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/290387b2-4285-4359-bfdc-f89128f0c0a2-serving-cert\") pod \"route-controller-manager-6576b87f9c-glk27\" (UID: \"290387b2-4285-4359-bfdc-f89128f0c0a2\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-glk27" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.682920 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dwrf6\" (UniqueName: \"kubernetes.io/projected/290387b2-4285-4359-bfdc-f89128f0c0a2-kube-api-access-dwrf6\") pod \"route-controller-manager-6576b87f9c-glk27\" (UID: \"290387b2-4285-4359-bfdc-f89128f0c0a2\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-glk27" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.682949 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lds9f\" (UniqueName: \"kubernetes.io/projected/b0d344f6-3e27-4724-a70d-3b91cfb19576-kube-api-access-lds9f\") pod \"openshift-apiserver-operator-796bbdcf4f-5fx6g\" (UID: \"b0d344f6-3e27-4724-a70d-3b91cfb19576\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5fx6g" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.683003 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.683070 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/af3320db-8161-492c-89ef-79aff52e898c-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-82wc7\" (UID: \"af3320db-8161-492c-89ef-79aff52e898c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-82wc7" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.683110 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lh8bd\" (UniqueName: \"kubernetes.io/projected/e109a3ac-c4bc-4f0b-a2d0-ed4eea1f63c4-kube-api-access-lh8bd\") pod \"console-operator-58897d9998-vq8zt\" (UID: \"e109a3ac-c4bc-4f0b-a2d0-ed4eea1f63c4\") " pod="openshift-console-operator/console-operator-58897d9998-vq8zt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.683148 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/25030986-5796-4784-accd-c465c7c2daa3-trusted-ca-bundle\") pod \"console-f9d7485db-qfdqh\" (UID: \"25030986-5796-4784-accd-c465c7c2daa3\") " pod="openshift-console/console-f9d7485db-qfdqh" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.683215 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af3320db-8161-492c-89ef-79aff52e898c-config\") pod \"authentication-operator-69f744f599-82wc7\" (UID: \"af3320db-8161-492c-89ef-79aff52e898c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-82wc7" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.683267 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqrld\" (UniqueName: \"kubernetes.io/projected/25030986-5796-4784-accd-c465c7c2daa3-kube-api-access-gqrld\") pod \"console-f9d7485db-qfdqh\" (UID: \"25030986-5796-4784-accd-c465c7c2daa3\") " pod="openshift-console/console-f9d7485db-qfdqh" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.683385 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-ca-trust-extracted\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.683438 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.683507 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-swkxq\" (UniqueName: \"kubernetes.io/projected/9dfb8a84-f022-4823-b563-5800b665b32f-kube-api-access-swkxq\") pod \"downloads-7954f5f757-vwp8j\" (UID: \"9dfb8a84-f022-4823-b563-5800b665b32f\") " pod="openshift-console/downloads-7954f5f757-vwp8j" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.683558 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.683652 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x9f9v\" (UniqueName: \"kubernetes.io/projected/af3320db-8161-492c-89ef-79aff52e898c-kube-api-access-x9f9v\") pod \"authentication-operator-69f744f599-82wc7\" (UID: \"af3320db-8161-492c-89ef-79aff52e898c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-82wc7" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.683706 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.683794 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.683847 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e109a3ac-c4bc-4f0b-a2d0-ed4eea1f63c4-serving-cert\") pod \"console-operator-58897d9998-vq8zt\" (UID: \"e109a3ac-c4bc-4f0b-a2d0-ed4eea1f63c4\") " pod="openshift-console-operator/console-operator-58897d9998-vq8zt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.683952 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/13b910b7-69a1-438a-9ebe-d865adc99607-audit-policies\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.684118 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-registry-tls\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.684133 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.684168 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/290387b2-4285-4359-bfdc-f89128f0c0a2-config\") pod \"route-controller-manager-6576b87f9c-glk27\" (UID: \"290387b2-4285-4359-bfdc-f89128f0c0a2\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-glk27" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.684236 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af3320db-8161-492c-89ef-79aff52e898c-serving-cert\") pod \"authentication-operator-69f744f599-82wc7\" (UID: \"af3320db-8161-492c-89ef-79aff52e898c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-82wc7" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.684255 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.702247 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.722028 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.743740 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.762149 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.781964 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.785654 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.785861 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dzjlk\" (UniqueName: \"kubernetes.io/projected/adb95af6-2754-4b77-94a8-c8df9d429a2c-kube-api-access-dzjlk\") pod \"service-ca-operator-777779d784-8h7nh\" (UID: \"adb95af6-2754-4b77-94a8-c8df9d429a2c\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-8h7nh" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.785914 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c8ba13cb-4099-4fa8-b0fd-dba5852bd704-profile-collector-cert\") pod \"olm-operator-6b444d44fb-jb467\" (UID: \"c8ba13cb-4099-4fa8-b0fd-dba5852bd704\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jb467" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.785980 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/73f71d36-826a-4890-8f3f-6f1f3f159d5e-metrics-certs\") pod \"router-default-5444994796-gncr9\" (UID: \"73f71d36-826a-4890-8f3f-6f1f3f159d5e\") " pod="openshift-ingress/router-default-5444994796-gncr9" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.786035 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/eaa93d21-d7c9-47f7-bfad-16f6e8a0bfff-srv-cert\") pod \"catalog-operator-68c6474976-p7j5p\" (UID: \"eaa93d21-d7c9-47f7-bfad-16f6e8a0bfff\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p7j5p" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.786081 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/b18bda87-1ef5-4511-ae6b-d9326a76aca2-images\") pod \"machine-config-operator-74547568cd-nnqb4\" (UID: \"b18bda87-1ef5-4511-ae6b-d9326a76aca2\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nnqb4" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.786134 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b0d344f6-3e27-4724-a70d-3b91cfb19576-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-5fx6g\" (UID: \"b0d344f6-3e27-4724-a70d-3b91cfb19576\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5fx6g" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.786181 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7cl2f\" (UniqueName: \"kubernetes.io/projected/9bd7b7cc-e80c-40aa-a6b8-8b9272ccdfb7-kube-api-access-7cl2f\") pod \"dns-operator-744455d44c-4pktg\" (UID: \"9bd7b7cc-e80c-40aa-a6b8-8b9272ccdfb7\") " pod="openshift-dns-operator/dns-operator-744455d44c-4pktg" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.786223 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/116c03c2-be5b-427a-8143-f40794e102a5-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ktvdg\" (UID: \"116c03c2-be5b-427a-8143-f40794e102a5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ktvdg" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.786337 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e109a3ac-c4bc-4f0b-a2d0-ed4eea1f63c4-trusted-ca\") pod \"console-operator-58897d9998-vq8zt\" (UID: \"e109a3ac-c4bc-4f0b-a2d0-ed4eea1f63c4\") " pod="openshift-console-operator/console-operator-58897d9998-vq8zt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.786393 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/19bb1706-6f02-4fe1-ac74-3425bc25376c-apiservice-cert\") pod \"packageserver-d55dfcdfc-jnbdg\" (UID: \"19bb1706-6f02-4fe1-ac74-3425bc25376c\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jnbdg" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.786442 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/f9e69f1b-47a1-4ea5-9d69-b79bf401810a-mountpoint-dir\") pod \"csi-hostpathplugin-zz4kv\" (UID: \"f9e69f1b-47a1-4ea5-9d69-b79bf401810a\") " pod="hostpath-provisioner/csi-hostpathplugin-zz4kv" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.786503 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/b18bda87-1ef5-4511-ae6b-d9326a76aca2-proxy-tls\") pod \"machine-config-operator-74547568cd-nnqb4\" (UID: \"b18bda87-1ef5-4511-ae6b-d9326a76aca2\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nnqb4" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.786551 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rsxzw\" (UniqueName: \"kubernetes.io/projected/13b910b7-69a1-438a-9ebe-d865adc99607-kube-api-access-rsxzw\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.786591 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/81241e6e-dcc2-4509-8e40-fd330f57a15b-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-jm5zr\" (UID: \"81241e6e-dcc2-4509-8e40-fd330f57a15b\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jm5zr" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.786628 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ec1b52c7-15a6-4489-ba94-18d3621f4931-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-n4nhc\" (UID: \"ec1b52c7-15a6-4489-ba94-18d3621f4931\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-n4nhc" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.786660 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/116c03c2-be5b-427a-8143-f40794e102a5-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ktvdg\" (UID: \"116c03c2-be5b-427a-8143-f40794e102a5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ktvdg" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.786692 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/edcc6478-c344-433f-9fef-c27760c464fc-signing-cabundle\") pod \"service-ca-9c57cc56f-cwfd8\" (UID: \"edcc6478-c344-433f-9fef-c27760c464fc\") " pod="openshift-service-ca/service-ca-9c57cc56f-cwfd8" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.786726 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e5c0da48-0f42-4508-be7c-da6125c90874-config\") pod \"kube-apiserver-operator-766d6c64bb-lt8gn\" (UID: \"e5c0da48-0f42-4508-be7c-da6125c90874\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lt8gn" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.786774 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/13b910b7-69a1-438a-9ebe-d865adc99607-audit-dir\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.786811 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ec1b52c7-15a6-4489-ba94-18d3621f4931-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-n4nhc\" (UID: \"ec1b52c7-15a6-4489-ba94-18d3621f4931\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-n4nhc" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.786844 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kgbmw\" (UniqueName: \"kubernetes.io/projected/73f71d36-826a-4890-8f3f-6f1f3f159d5e-kube-api-access-kgbmw\") pod \"router-default-5444994796-gncr9\" (UID: \"73f71d36-826a-4890-8f3f-6f1f3f159d5e\") " pod="openshift-ingress/router-default-5444994796-gncr9" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.786879 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/adb95af6-2754-4b77-94a8-c8df9d429a2c-serving-cert\") pod \"service-ca-operator-777779d784-8h7nh\" (UID: \"adb95af6-2754-4b77-94a8-c8df9d429a2c\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-8h7nh" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.786933 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.786975 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b58m5\" (UniqueName: \"kubernetes.io/projected/edcc6478-c344-433f-9fef-c27760c464fc-kube-api-access-b58m5\") pod \"service-ca-9c57cc56f-cwfd8\" (UID: \"edcc6478-c344-433f-9fef-c27760c464fc\") " pod="openshift-service-ca/service-ca-9c57cc56f-cwfd8" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.787014 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dwrf6\" (UniqueName: \"kubernetes.io/projected/290387b2-4285-4359-bfdc-f89128f0c0a2-kube-api-access-dwrf6\") pod \"route-controller-manager-6576b87f9c-glk27\" (UID: \"290387b2-4285-4359-bfdc-f89128f0c0a2\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-glk27" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.787058 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/af3320db-8161-492c-89ef-79aff52e898c-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-82wc7\" (UID: \"af3320db-8161-492c-89ef-79aff52e898c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-82wc7" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.787093 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/13abb7ac-3229-4f80-9132-d51f89ec896b-node-bootstrap-token\") pod \"machine-config-server-m2pbv\" (UID: \"13abb7ac-3229-4f80-9132-d51f89ec896b\") " pod="openshift-machine-config-operator/machine-config-server-m2pbv" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.787136 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fabb60f0-0ad0-4cb0-9a64-81ecde15afff-serving-cert\") pod \"etcd-operator-b45778765-9z9lt\" (UID: \"fabb60f0-0ad0-4cb0-9a64-81ecde15afff\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9z9lt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.787191 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lh8bd\" (UniqueName: \"kubernetes.io/projected/e109a3ac-c4bc-4f0b-a2d0-ed4eea1f63c4-kube-api-access-lh8bd\") pod \"console-operator-58897d9998-vq8zt\" (UID: \"e109a3ac-c4bc-4f0b-a2d0-ed4eea1f63c4\") " pod="openshift-console-operator/console-operator-58897d9998-vq8zt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.787234 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/a16c7c46-2c28-444d-8b7d-0ef797877620-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-g2jw7\" (UID: \"a16c7c46-2c28-444d-8b7d-0ef797877620\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-g2jw7" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.787277 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dvkzs\" (UniqueName: \"kubernetes.io/projected/8b2886cf-264e-4ecd-b5f3-05c8974c7990-kube-api-access-dvkzs\") pod \"machine-config-controller-84d6567774-5kk6k\" (UID: \"8b2886cf-264e-4ecd-b5f3-05c8974c7990\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5kk6k" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.787350 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8wzv4\" (UniqueName: \"kubernetes.io/projected/19bb1706-6f02-4fe1-ac74-3425bc25376c-kube-api-access-8wzv4\") pod \"packageserver-d55dfcdfc-jnbdg\" (UID: \"19bb1706-6f02-4fe1-ac74-3425bc25376c\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jnbdg" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.787388 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/f9e69f1b-47a1-4ea5-9d69-b79bf401810a-plugins-dir\") pod \"csi-hostpathplugin-zz4kv\" (UID: \"f9e69f1b-47a1-4ea5-9d69-b79bf401810a\") " pod="hostpath-provisioner/csi-hostpathplugin-zz4kv" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.787435 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c8ba13cb-4099-4fa8-b0fd-dba5852bd704-srv-cert\") pod \"olm-operator-6b444d44fb-jb467\" (UID: \"c8ba13cb-4099-4fa8-b0fd-dba5852bd704\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jb467" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.787478 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/581b0050-27fb-4d72-9f11-75f4eb55a783-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-6kv25\" (UID: \"581b0050-27fb-4d72-9f11-75f4eb55a783\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-6kv25" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.787511 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qvgk4\" (UniqueName: \"kubernetes.io/projected/ec1b52c7-15a6-4489-ba94-18d3621f4931-kube-api-access-qvgk4\") pod \"kube-storage-version-migrator-operator-b67b599dd-n4nhc\" (UID: \"ec1b52c7-15a6-4489-ba94-18d3621f4931\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-n4nhc" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.787545 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/a2386288-e064-42f1-aac0-5866f0179542-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-7w8nb\" (UID: \"a2386288-e064-42f1-aac0-5866f0179542\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-7w8nb" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.787580 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wt8fp\" (UniqueName: \"kubernetes.io/projected/9fa98008-2fa3-4d38-9ebe-53e07a4d4c1d-kube-api-access-wt8fp\") pod \"dns-default-9hccb\" (UID: \"9fa98008-2fa3-4d38-9ebe-53e07a4d4c1d\") " pod="openshift-dns/dns-default-9hccb" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.787629 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.787667 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/8b2886cf-264e-4ecd-b5f3-05c8974c7990-proxy-tls\") pod \"machine-config-controller-84d6567774-5kk6k\" (UID: \"8b2886cf-264e-4ecd-b5f3-05c8974c7990\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5kk6k" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.787720 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kp6ns\" (UniqueName: \"kubernetes.io/projected/81241e6e-dcc2-4509-8e40-fd330f57a15b-kube-api-access-kp6ns\") pod \"package-server-manager-789f6589d5-jm5zr\" (UID: \"81241e6e-dcc2-4509-8e40-fd330f57a15b\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jm5zr" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.787756 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/8b2f4cca-09b7-44dc-9458-298b0e3c8507-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-txjgw\" (UID: \"8b2f4cca-09b7-44dc-9458-298b0e3c8507\") " pod="openshift-marketplace/marketplace-operator-79b997595-txjgw" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.787794 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.787828 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/19bb1706-6f02-4fe1-ac74-3425bc25376c-webhook-cert\") pod \"packageserver-d55dfcdfc-jnbdg\" (UID: \"19bb1706-6f02-4fe1-ac74-3425bc25376c\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jnbdg" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.787865 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rt8wv\" (UniqueName: \"kubernetes.io/projected/a16c7c46-2c28-444d-8b7d-0ef797877620-kube-api-access-rt8wv\") pod \"control-plane-machine-set-operator-78cbb6b69f-g2jw7\" (UID: \"a16c7c46-2c28-444d-8b7d-0ef797877620\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-g2jw7" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.787919 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/9fa98008-2fa3-4d38-9ebe-53e07a4d4c1d-metrics-tls\") pod \"dns-default-9hccb\" (UID: \"9fa98008-2fa3-4d38-9ebe-53e07a4d4c1d\") " pod="openshift-dns/dns-default-9hccb" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.787954 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/29912f3d-383e-4361-b882-48ab47cecb56-trusted-ca\") pod \"ingress-operator-5b745b69d9-kj5z5\" (UID: \"29912f3d-383e-4361-b882-48ab47cecb56\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kj5z5" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.787991 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/116c03c2-be5b-427a-8143-f40794e102a5-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ktvdg\" (UID: \"116c03c2-be5b-427a-8143-f40794e102a5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ktvdg" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.788029 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7x8w\" (UniqueName: \"kubernetes.io/projected/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-kube-api-access-w7x8w\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.788064 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.788102 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zgp9n\" (UniqueName: \"kubernetes.io/projected/b18bda87-1ef5-4511-ae6b-d9326a76aca2-kube-api-access-zgp9n\") pod \"machine-config-operator-74547568cd-nnqb4\" (UID: \"b18bda87-1ef5-4511-ae6b-d9326a76aca2\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nnqb4" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.788925 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e5c0da48-0f42-4508-be7c-da6125c90874-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-lt8gn\" (UID: \"e5c0da48-0f42-4508-be7c-da6125c90874\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lt8gn" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.788975 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/da42804b-0fa3-43ee-9566-296c28b8052f-config-volume\") pod \"collect-profiles-29402775-ts75f\" (UID: \"da42804b-0fa3-43ee-9566-296c28b8052f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402775-ts75f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.789041 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-registry-certificates\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.789082 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.789121 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/25030986-5796-4784-accd-c465c7c2daa3-oauth-serving-cert\") pod \"console-f9d7485db-qfdqh\" (UID: \"25030986-5796-4784-accd-c465c7c2daa3\") " pod="openshift-console/console-f9d7485db-qfdqh" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.789159 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/73f71d36-826a-4890-8f3f-6f1f3f159d5e-service-ca-bundle\") pod \"router-default-5444994796-gncr9\" (UID: \"73f71d36-826a-4890-8f3f-6f1f3f159d5e\") " pod="openshift-ingress/router-default-5444994796-gncr9" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.789195 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/f9e69f1b-47a1-4ea5-9d69-b79bf401810a-socket-dir\") pod \"csi-hostpathplugin-zz4kv\" (UID: \"f9e69f1b-47a1-4ea5-9d69-b79bf401810a\") " pod="hostpath-provisioner/csi-hostpathplugin-zz4kv" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.789230 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d8042008-6a66-42ca-8f5e-76dd748cf0ba-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-8wsp6\" (UID: \"d8042008-6a66-42ca-8f5e-76dd748cf0ba\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8wsp6" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.789278 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.789352 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.789392 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/73f71d36-826a-4890-8f3f-6f1f3f159d5e-stats-auth\") pod \"router-default-5444994796-gncr9\" (UID: \"73f71d36-826a-4890-8f3f-6f1f3f159d5e\") " pod="openshift-ingress/router-default-5444994796-gncr9" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.789433 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gbzl7\" (UniqueName: \"kubernetes.io/projected/b0ea6163-7bb2-458d-bd2b-5ec1b5d4960c-kube-api-access-gbzl7\") pod \"migrator-59844c95c7-7wr5z\" (UID: \"b0ea6163-7bb2-458d-bd2b-5ec1b5d4960c\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-7wr5z" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.789474 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/25030986-5796-4784-accd-c465c7c2daa3-console-oauth-config\") pod \"console-f9d7485db-qfdqh\" (UID: \"25030986-5796-4784-accd-c465c7c2daa3\") " pod="openshift-console/console-f9d7485db-qfdqh" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.789508 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8042008-6a66-42ca-8f5e-76dd748cf0ba-config\") pod \"kube-controller-manager-operator-78b949d7b-8wsp6\" (UID: \"d8042008-6a66-42ca-8f5e-76dd748cf0ba\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8wsp6" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.789566 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kncvx\" (UniqueName: \"kubernetes.io/projected/fabb60f0-0ad0-4cb0-9a64-81ecde15afff-kube-api-access-kncvx\") pod \"etcd-operator-b45778765-9z9lt\" (UID: \"fabb60f0-0ad0-4cb0-9a64-81ecde15afff\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9z9lt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.789606 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/af3320db-8161-492c-89ef-79aff52e898c-service-ca-bundle\") pod \"authentication-operator-69f744f599-82wc7\" (UID: \"af3320db-8161-492c-89ef-79aff52e898c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-82wc7" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.789642 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/25030986-5796-4784-accd-c465c7c2daa3-console-config\") pod \"console-f9d7485db-qfdqh\" (UID: \"25030986-5796-4784-accd-c465c7c2daa3\") " pod="openshift-console/console-f9d7485db-qfdqh" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.789679 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rjk2b\" (UniqueName: \"kubernetes.io/projected/13abb7ac-3229-4f80-9132-d51f89ec896b-kube-api-access-rjk2b\") pod \"machine-config-server-m2pbv\" (UID: \"13abb7ac-3229-4f80-9132-d51f89ec896b\") " pod="openshift-machine-config-operator/machine-config-server-m2pbv" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.789713 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/f9e69f1b-47a1-4ea5-9d69-b79bf401810a-csi-data-dir\") pod \"csi-hostpathplugin-zz4kv\" (UID: \"f9e69f1b-47a1-4ea5-9d69-b79bf401810a\") " pod="hostpath-provisioner/csi-hostpathplugin-zz4kv" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.789748 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fp4xp\" (UniqueName: \"kubernetes.io/projected/77dc662c-6698-4035-b332-13bc5a0f3136-kube-api-access-fp4xp\") pod \"ingress-canary-85ns4\" (UID: \"77dc662c-6698-4035-b332-13bc5a0f3136\") " pod="openshift-ingress-canary/ingress-canary-85ns4" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.789782 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pp69n\" (UniqueName: \"kubernetes.io/projected/29912f3d-383e-4361-b882-48ab47cecb56-kube-api-access-pp69n\") pod \"ingress-operator-5b745b69d9-kj5z5\" (UID: \"29912f3d-383e-4361-b882-48ab47cecb56\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kj5z5" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.789818 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/25030986-5796-4784-accd-c465c7c2daa3-service-ca\") pod \"console-f9d7485db-qfdqh\" (UID: \"25030986-5796-4784-accd-c465c7c2daa3\") " pod="openshift-console/console-f9d7485db-qfdqh" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.789903 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-bound-sa-token\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.789944 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/fabb60f0-0ad0-4cb0-9a64-81ecde15afff-etcd-service-ca\") pod \"etcd-operator-b45778765-9z9lt\" (UID: \"fabb60f0-0ad0-4cb0-9a64-81ecde15afff\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9z9lt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.789980 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-installation-pull-secrets\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.790024 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/19bb1706-6f02-4fe1-ac74-3425bc25376c-tmpfs\") pod \"packageserver-d55dfcdfc-jnbdg\" (UID: \"19bb1706-6f02-4fe1-ac74-3425bc25376c\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jnbdg" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.790123 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/13abb7ac-3229-4f80-9132-d51f89ec896b-certs\") pod \"machine-config-server-m2pbv\" (UID: \"13abb7ac-3229-4f80-9132-d51f89ec896b\") " pod="openshift-machine-config-operator/machine-config-server-m2pbv" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.790182 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/fabb60f0-0ad0-4cb0-9a64-81ecde15afff-etcd-client\") pod \"etcd-operator-b45778765-9z9lt\" (UID: \"fabb60f0-0ad0-4cb0-9a64-81ecde15afff\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9z9lt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.790237 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/290387b2-4285-4359-bfdc-f89128f0c0a2-client-ca\") pod \"route-controller-manager-6576b87f9c-glk27\" (UID: \"290387b2-4285-4359-bfdc-f89128f0c0a2\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-glk27" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.790326 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-trusted-ca\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.790401 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9fa98008-2fa3-4d38-9ebe-53e07a4d4c1d-config-volume\") pod \"dns-default-9hccb\" (UID: \"9fa98008-2fa3-4d38-9ebe-53e07a4d4c1d\") " pod="openshift-dns/dns-default-9hccb" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.790437 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/73f71d36-826a-4890-8f3f-6f1f3f159d5e-default-certificate\") pod \"router-default-5444994796-gncr9\" (UID: \"73f71d36-826a-4890-8f3f-6f1f3f159d5e\") " pod="openshift-ingress/router-default-5444994796-gncr9" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.790480 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/9bd7b7cc-e80c-40aa-a6b8-8b9272ccdfb7-metrics-tls\") pod \"dns-operator-744455d44c-4pktg\" (UID: \"9bd7b7cc-e80c-40aa-a6b8-8b9272ccdfb7\") " pod="openshift-dns-operator/dns-operator-744455d44c-4pktg" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.790517 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/8b2886cf-264e-4ecd-b5f3-05c8974c7990-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-5kk6k\" (UID: \"8b2886cf-264e-4ecd-b5f3-05c8974c7990\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5kk6k" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.790553 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xsgcw\" (UniqueName: \"kubernetes.io/projected/f9e69f1b-47a1-4ea5-9d69-b79bf401810a-kube-api-access-xsgcw\") pod \"csi-hostpathplugin-zz4kv\" (UID: \"f9e69f1b-47a1-4ea5-9d69-b79bf401810a\") " pod="hostpath-provisioner/csi-hostpathplugin-zz4kv" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.790590 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/290387b2-4285-4359-bfdc-f89128f0c0a2-serving-cert\") pod \"route-controller-manager-6576b87f9c-glk27\" (UID: \"290387b2-4285-4359-bfdc-f89128f0c0a2\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-glk27" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.790646 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lds9f\" (UniqueName: \"kubernetes.io/projected/b0d344f6-3e27-4724-a70d-3b91cfb19576-kube-api-access-lds9f\") pod \"openshift-apiserver-operator-796bbdcf4f-5fx6g\" (UID: \"b0d344f6-3e27-4724-a70d-3b91cfb19576\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5fx6g" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.790704 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/adb95af6-2754-4b77-94a8-c8df9d429a2c-config\") pod \"service-ca-operator-777779d784-8h7nh\" (UID: \"adb95af6-2754-4b77-94a8-c8df9d429a2c\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-8h7nh" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.790741 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-brt8r\" (UniqueName: \"kubernetes.io/projected/a2386288-e064-42f1-aac0-5866f0179542-kube-api-access-brt8r\") pod \"multus-admission-controller-857f4d67dd-7w8nb\" (UID: \"a2386288-e064-42f1-aac0-5866f0179542\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-7w8nb" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.790780 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/25030986-5796-4784-accd-c465c7c2daa3-trusted-ca-bundle\") pod \"console-f9d7485db-qfdqh\" (UID: \"25030986-5796-4784-accd-c465c7c2daa3\") " pod="openshift-console/console-f9d7485db-qfdqh" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.790885 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af3320db-8161-492c-89ef-79aff52e898c-config\") pod \"authentication-operator-69f744f599-82wc7\" (UID: \"af3320db-8161-492c-89ef-79aff52e898c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-82wc7" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.790921 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqrld\" (UniqueName: \"kubernetes.io/projected/25030986-5796-4784-accd-c465c7c2daa3-kube-api-access-gqrld\") pod \"console-f9d7485db-qfdqh\" (UID: \"25030986-5796-4784-accd-c465c7c2daa3\") " pod="openshift-console/console-f9d7485db-qfdqh" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.790957 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/fabb60f0-0ad0-4cb0-9a64-81ecde15afff-etcd-ca\") pod \"etcd-operator-b45778765-9z9lt\" (UID: \"fabb60f0-0ad0-4cb0-9a64-81ecde15afff\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9z9lt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.790994 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.791031 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nccnc\" (UniqueName: \"kubernetes.io/projected/581b0050-27fb-4d72-9f11-75f4eb55a783-kube-api-access-nccnc\") pod \"openshift-controller-manager-operator-756b6f6bc6-6kv25\" (UID: \"581b0050-27fb-4d72-9f11-75f4eb55a783\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-6kv25" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.791068 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/29912f3d-383e-4361-b882-48ab47cecb56-metrics-tls\") pod \"ingress-operator-5b745b69d9-kj5z5\" (UID: \"29912f3d-383e-4361-b882-48ab47cecb56\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kj5z5" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.791104 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-ca-trust-extracted\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.791143 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e5c0da48-0f42-4508-be7c-da6125c90874-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-lt8gn\" (UID: \"e5c0da48-0f42-4508-be7c-da6125c90874\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lt8gn" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.791240 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/77dc662c-6698-4035-b332-13bc5a0f3136-cert\") pod \"ingress-canary-85ns4\" (UID: \"77dc662c-6698-4035-b332-13bc5a0f3136\") " pod="openshift-ingress-canary/ingress-canary-85ns4" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.791310 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-swkxq\" (UniqueName: \"kubernetes.io/projected/9dfb8a84-f022-4823-b563-5800b665b32f-kube-api-access-swkxq\") pod \"downloads-7954f5f757-vwp8j\" (UID: \"9dfb8a84-f022-4823-b563-5800b665b32f\") " pod="openshift-console/downloads-7954f5f757-vwp8j" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.791348 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.791386 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-47ck9\" (UniqueName: \"kubernetes.io/projected/8b2f4cca-09b7-44dc-9458-298b0e3c8507-kube-api-access-47ck9\") pod \"marketplace-operator-79b997595-txjgw\" (UID: \"8b2f4cca-09b7-44dc-9458-298b0e3c8507\") " pod="openshift-marketplace/marketplace-operator-79b997595-txjgw" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.791427 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x9f9v\" (UniqueName: \"kubernetes.io/projected/af3320db-8161-492c-89ef-79aff52e898c-kube-api-access-x9f9v\") pod \"authentication-operator-69f744f599-82wc7\" (UID: \"af3320db-8161-492c-89ef-79aff52e898c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-82wc7" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.791464 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.791500 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e109a3ac-c4bc-4f0b-a2d0-ed4eea1f63c4-serving-cert\") pod \"console-operator-58897d9998-vq8zt\" (UID: \"e109a3ac-c4bc-4f0b-a2d0-ed4eea1f63c4\") " pod="openshift-console-operator/console-operator-58897d9998-vq8zt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.791539 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/13b910b7-69a1-438a-9ebe-d865adc99607-audit-policies\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.791578 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b18bda87-1ef5-4511-ae6b-d9326a76aca2-auth-proxy-config\") pod \"machine-config-operator-74547568cd-nnqb4\" (UID: \"b18bda87-1ef5-4511-ae6b-d9326a76aca2\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nnqb4" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.791617 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/290387b2-4285-4359-bfdc-f89128f0c0a2-config\") pod \"route-controller-manager-6576b87f9c-glk27\" (UID: \"290387b2-4285-4359-bfdc-f89128f0c0a2\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-glk27" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.791655 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af3320db-8161-492c-89ef-79aff52e898c-serving-cert\") pod \"authentication-operator-69f744f599-82wc7\" (UID: \"af3320db-8161-492c-89ef-79aff52e898c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-82wc7" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.792271 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pj7zm\" (UniqueName: \"kubernetes.io/projected/da42804b-0fa3-43ee-9566-296c28b8052f-kube-api-access-pj7zm\") pod \"collect-profiles-29402775-ts75f\" (UID: \"da42804b-0fa3-43ee-9566-296c28b8052f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402775-ts75f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.792465 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d8042008-6a66-42ca-8f5e-76dd748cf0ba-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-8wsp6\" (UID: \"d8042008-6a66-42ca-8f5e-76dd748cf0ba\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8wsp6" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.792545 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-registry-tls\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.792597 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n9npv\" (UniqueName: \"kubernetes.io/projected/eaa93d21-d7c9-47f7-bfad-16f6e8a0bfff-kube-api-access-n9npv\") pod \"catalog-operator-68c6474976-p7j5p\" (UID: \"eaa93d21-d7c9-47f7-bfad-16f6e8a0bfff\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p7j5p" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.792650 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/25030986-5796-4784-accd-c465c7c2daa3-console-serving-cert\") pod \"console-f9d7485db-qfdqh\" (UID: \"25030986-5796-4784-accd-c465c7c2daa3\") " pod="openshift-console/console-f9d7485db-qfdqh" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.792933 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b0d344f6-3e27-4724-a70d-3b91cfb19576-config\") pod \"openshift-apiserver-operator-796bbdcf4f-5fx6g\" (UID: \"b0d344f6-3e27-4724-a70d-3b91cfb19576\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5fx6g" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.792984 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.793025 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/eaa93d21-d7c9-47f7-bfad-16f6e8a0bfff-profile-collector-cert\") pod \"catalog-operator-68c6474976-p7j5p\" (UID: \"eaa93d21-d7c9-47f7-bfad-16f6e8a0bfff\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p7j5p" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.793071 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fabb60f0-0ad0-4cb0-9a64-81ecde15afff-config\") pod \"etcd-operator-b45778765-9z9lt\" (UID: \"fabb60f0-0ad0-4cb0-9a64-81ecde15afff\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9z9lt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.793119 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/edcc6478-c344-433f-9fef-c27760c464fc-signing-key\") pod \"service-ca-9c57cc56f-cwfd8\" (UID: \"edcc6478-c344-433f-9fef-c27760c464fc\") " pod="openshift-service-ca/service-ca-9c57cc56f-cwfd8" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.793487 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/581b0050-27fb-4d72-9f11-75f4eb55a783-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-6kv25\" (UID: \"581b0050-27fb-4d72-9f11-75f4eb55a783\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-6kv25" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.793531 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e109a3ac-c4bc-4f0b-a2d0-ed4eea1f63c4-config\") pod \"console-operator-58897d9998-vq8zt\" (UID: \"e109a3ac-c4bc-4f0b-a2d0-ed4eea1f63c4\") " pod="openshift-console-operator/console-operator-58897d9998-vq8zt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.793602 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2jws\" (UniqueName: \"kubernetes.io/projected/c8ba13cb-4099-4fa8-b0fd-dba5852bd704-kube-api-access-t2jws\") pod \"olm-operator-6b444d44fb-jb467\" (UID: \"c8ba13cb-4099-4fa8-b0fd-dba5852bd704\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jb467" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.793649 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8b2f4cca-09b7-44dc-9458-298b0e3c8507-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-txjgw\" (UID: \"8b2f4cca-09b7-44dc-9458-298b0e3c8507\") " pod="openshift-marketplace/marketplace-operator-79b997595-txjgw" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.793914 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/29912f3d-383e-4361-b882-48ab47cecb56-bound-sa-token\") pod \"ingress-operator-5b745b69d9-kj5z5\" (UID: \"29912f3d-383e-4361-b882-48ab47cecb56\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kj5z5" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.793977 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/da42804b-0fa3-43ee-9566-296c28b8052f-secret-volume\") pod \"collect-profiles-29402775-ts75f\" (UID: \"da42804b-0fa3-43ee-9566-296c28b8052f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402775-ts75f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.794033 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/f9e69f1b-47a1-4ea5-9d69-b79bf401810a-registration-dir\") pod \"csi-hostpathplugin-zz4kv\" (UID: \"f9e69f1b-47a1-4ea5-9d69-b79bf401810a\") " pod="hostpath-provisioner/csi-hostpathplugin-zz4kv" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.797689 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/13b910b7-69a1-438a-9ebe-d865adc99607-audit-dir\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.799841 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.803353 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-registry-certificates\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.804106 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.804579 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.807861 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.808316 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/af3320db-8161-492c-89ef-79aff52e898c-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-82wc7\" (UID: \"af3320db-8161-492c-89ef-79aff52e898c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-82wc7" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.808341 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-registry-tls\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.808702 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e109a3ac-c4bc-4f0b-a2d0-ed4eea1f63c4-config\") pod \"console-operator-58897d9998-vq8zt\" (UID: \"e109a3ac-c4bc-4f0b-a2d0-ed4eea1f63c4\") " pod="openshift-console-operator/console-operator-58897d9998-vq8zt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.809730 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.810346 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.810890 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/af3320db-8161-492c-89ef-79aff52e898c-service-ca-bundle\") pod \"authentication-operator-69f744f599-82wc7\" (UID: \"af3320db-8161-492c-89ef-79aff52e898c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-82wc7" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.812471 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.812566 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/13b910b7-69a1-438a-9ebe-d865adc99607-audit-policies\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.812672 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.813139 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e109a3ac-c4bc-4f0b-a2d0-ed4eea1f63c4-trusted-ca\") pod \"console-operator-58897d9998-vq8zt\" (UID: \"e109a3ac-c4bc-4f0b-a2d0-ed4eea1f63c4\") " pod="openshift-console-operator/console-operator-58897d9998-vq8zt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.813909 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-installation-pull-secrets\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.814140 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/25030986-5796-4784-accd-c465c7c2daa3-console-config\") pod \"console-f9d7485db-qfdqh\" (UID: \"25030986-5796-4784-accd-c465c7c2daa3\") " pod="openshift-console/console-f9d7485db-qfdqh" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.814200 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/25030986-5796-4784-accd-c465c7c2daa3-oauth-serving-cert\") pod \"console-f9d7485db-qfdqh\" (UID: \"25030986-5796-4784-accd-c465c7c2daa3\") " pod="openshift-console/console-f9d7485db-qfdqh" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.814784 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.815070 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/290387b2-4285-4359-bfdc-f89128f0c0a2-config\") pod \"route-controller-manager-6576b87f9c-glk27\" (UID: \"290387b2-4285-4359-bfdc-f89128f0c0a2\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-glk27" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.815458 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/25030986-5796-4784-accd-c465c7c2daa3-service-ca\") pod \"console-f9d7485db-qfdqh\" (UID: \"25030986-5796-4784-accd-c465c7c2daa3\") " pod="openshift-console/console-f9d7485db-qfdqh" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.816279 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-trusted-ca\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.816814 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b0d344f6-3e27-4724-a70d-3b91cfb19576-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-5fx6g\" (UID: \"b0d344f6-3e27-4724-a70d-3b91cfb19576\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5fx6g" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.817703 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b0d344f6-3e27-4724-a70d-3b91cfb19576-config\") pod \"openshift-apiserver-operator-796bbdcf4f-5fx6g\" (UID: \"b0d344f6-3e27-4724-a70d-3b91cfb19576\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5fx6g" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.818133 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/290387b2-4285-4359-bfdc-f89128f0c0a2-client-ca\") pod \"route-controller-manager-6576b87f9c-glk27\" (UID: \"290387b2-4285-4359-bfdc-f89128f0c0a2\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-glk27" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.818275 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af3320db-8161-492c-89ef-79aff52e898c-config\") pod \"authentication-operator-69f744f599-82wc7\" (UID: \"af3320db-8161-492c-89ef-79aff52e898c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-82wc7" Nov 26 14:17:54 crc kubenswrapper[5037]: E1126 14:17:54.818505 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:17:55.318458676 +0000 UTC m=+142.115228870 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.819434 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-ca-trust-extracted\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.820267 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e109a3ac-c4bc-4f0b-a2d0-ed4eea1f63c4-serving-cert\") pod \"console-operator-58897d9998-vq8zt\" (UID: \"e109a3ac-c4bc-4f0b-a2d0-ed4eea1f63c4\") " pod="openshift-console-operator/console-operator-58897d9998-vq8zt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.822315 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/25030986-5796-4784-accd-c465c7c2daa3-trusted-ca-bundle\") pod \"console-f9d7485db-qfdqh\" (UID: \"25030986-5796-4784-accd-c465c7c2daa3\") " pod="openshift-console/console-f9d7485db-qfdqh" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.822396 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.823330 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/9bd7b7cc-e80c-40aa-a6b8-8b9272ccdfb7-metrics-tls\") pod \"dns-operator-744455d44c-4pktg\" (UID: \"9bd7b7cc-e80c-40aa-a6b8-8b9272ccdfb7\") " pod="openshift-dns-operator/dns-operator-744455d44c-4pktg" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.823496 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.823509 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.824355 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/290387b2-4285-4359-bfdc-f89128f0c0a2-serving-cert\") pod \"route-controller-manager-6576b87f9c-glk27\" (UID: \"290387b2-4285-4359-bfdc-f89128f0c0a2\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-glk27" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.825247 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.825391 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/25030986-5796-4784-accd-c465c7c2daa3-console-oauth-config\") pod \"console-f9d7485db-qfdqh\" (UID: \"25030986-5796-4784-accd-c465c7c2daa3\") " pod="openshift-console/console-f9d7485db-qfdqh" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.826165 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af3320db-8161-492c-89ef-79aff52e898c-serving-cert\") pod \"authentication-operator-69f744f599-82wc7\" (UID: \"af3320db-8161-492c-89ef-79aff52e898c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-82wc7" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.827272 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/25030986-5796-4784-accd-c465c7c2daa3-console-serving-cert\") pod \"console-f9d7485db-qfdqh\" (UID: \"25030986-5796-4784-accd-c465c7c2daa3\") " pod="openshift-console/console-f9d7485db-qfdqh" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.842587 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.849818 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-sk94z"] Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.861053 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.867354 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-lnds7"] Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.886465 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.894881 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kncvx\" (UniqueName: \"kubernetes.io/projected/fabb60f0-0ad0-4cb0-9a64-81ecde15afff-kube-api-access-kncvx\") pod \"etcd-operator-b45778765-9z9lt\" (UID: \"fabb60f0-0ad0-4cb0-9a64-81ecde15afff\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9z9lt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.894921 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rjk2b\" (UniqueName: \"kubernetes.io/projected/13abb7ac-3229-4f80-9132-d51f89ec896b-kube-api-access-rjk2b\") pod \"machine-config-server-m2pbv\" (UID: \"13abb7ac-3229-4f80-9132-d51f89ec896b\") " pod="openshift-machine-config-operator/machine-config-server-m2pbv" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.894944 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/f9e69f1b-47a1-4ea5-9d69-b79bf401810a-csi-data-dir\") pod \"csi-hostpathplugin-zz4kv\" (UID: \"f9e69f1b-47a1-4ea5-9d69-b79bf401810a\") " pod="hostpath-provisioner/csi-hostpathplugin-zz4kv" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.894964 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fp4xp\" (UniqueName: \"kubernetes.io/projected/77dc662c-6698-4035-b332-13bc5a0f3136-kube-api-access-fp4xp\") pod \"ingress-canary-85ns4\" (UID: \"77dc662c-6698-4035-b332-13bc5a0f3136\") " pod="openshift-ingress-canary/ingress-canary-85ns4" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.894981 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pp69n\" (UniqueName: \"kubernetes.io/projected/29912f3d-383e-4361-b882-48ab47cecb56-kube-api-access-pp69n\") pod \"ingress-operator-5b745b69d9-kj5z5\" (UID: \"29912f3d-383e-4361-b882-48ab47cecb56\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kj5z5" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895003 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/fabb60f0-0ad0-4cb0-9a64-81ecde15afff-etcd-service-ca\") pod \"etcd-operator-b45778765-9z9lt\" (UID: \"fabb60f0-0ad0-4cb0-9a64-81ecde15afff\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9z9lt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895020 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/19bb1706-6f02-4fe1-ac74-3425bc25376c-tmpfs\") pod \"packageserver-d55dfcdfc-jnbdg\" (UID: \"19bb1706-6f02-4fe1-ac74-3425bc25376c\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jnbdg" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895037 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/13abb7ac-3229-4f80-9132-d51f89ec896b-certs\") pod \"machine-config-server-m2pbv\" (UID: \"13abb7ac-3229-4f80-9132-d51f89ec896b\") " pod="openshift-machine-config-operator/machine-config-server-m2pbv" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895051 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/fabb60f0-0ad0-4cb0-9a64-81ecde15afff-etcd-client\") pod \"etcd-operator-b45778765-9z9lt\" (UID: \"fabb60f0-0ad0-4cb0-9a64-81ecde15afff\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9z9lt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895085 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9fa98008-2fa3-4d38-9ebe-53e07a4d4c1d-config-volume\") pod \"dns-default-9hccb\" (UID: \"9fa98008-2fa3-4d38-9ebe-53e07a4d4c1d\") " pod="openshift-dns/dns-default-9hccb" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895102 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/73f71d36-826a-4890-8f3f-6f1f3f159d5e-default-certificate\") pod \"router-default-5444994796-gncr9\" (UID: \"73f71d36-826a-4890-8f3f-6f1f3f159d5e\") " pod="openshift-ingress/router-default-5444994796-gncr9" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895122 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/8b2886cf-264e-4ecd-b5f3-05c8974c7990-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-5kk6k\" (UID: \"8b2886cf-264e-4ecd-b5f3-05c8974c7990\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5kk6k" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895142 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xsgcw\" (UniqueName: \"kubernetes.io/projected/f9e69f1b-47a1-4ea5-9d69-b79bf401810a-kube-api-access-xsgcw\") pod \"csi-hostpathplugin-zz4kv\" (UID: \"f9e69f1b-47a1-4ea5-9d69-b79bf401810a\") " pod="hostpath-provisioner/csi-hostpathplugin-zz4kv" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895172 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-brt8r\" (UniqueName: \"kubernetes.io/projected/a2386288-e064-42f1-aac0-5866f0179542-kube-api-access-brt8r\") pod \"multus-admission-controller-857f4d67dd-7w8nb\" (UID: \"a2386288-e064-42f1-aac0-5866f0179542\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-7w8nb" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895187 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/adb95af6-2754-4b77-94a8-c8df9d429a2c-config\") pod \"service-ca-operator-777779d784-8h7nh\" (UID: \"adb95af6-2754-4b77-94a8-c8df9d429a2c\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-8h7nh" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895203 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/fabb60f0-0ad0-4cb0-9a64-81ecde15afff-etcd-ca\") pod \"etcd-operator-b45778765-9z9lt\" (UID: \"fabb60f0-0ad0-4cb0-9a64-81ecde15afff\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9z9lt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895235 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nccnc\" (UniqueName: \"kubernetes.io/projected/581b0050-27fb-4d72-9f11-75f4eb55a783-kube-api-access-nccnc\") pod \"openshift-controller-manager-operator-756b6f6bc6-6kv25\" (UID: \"581b0050-27fb-4d72-9f11-75f4eb55a783\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-6kv25" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895252 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/29912f3d-383e-4361-b882-48ab47cecb56-metrics-tls\") pod \"ingress-operator-5b745b69d9-kj5z5\" (UID: \"29912f3d-383e-4361-b882-48ab47cecb56\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kj5z5" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895269 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e5c0da48-0f42-4508-be7c-da6125c90874-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-lt8gn\" (UID: \"e5c0da48-0f42-4508-be7c-da6125c90874\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lt8gn" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895296 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/77dc662c-6698-4035-b332-13bc5a0f3136-cert\") pod \"ingress-canary-85ns4\" (UID: \"77dc662c-6698-4035-b332-13bc5a0f3136\") " pod="openshift-ingress-canary/ingress-canary-85ns4" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895324 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-47ck9\" (UniqueName: \"kubernetes.io/projected/8b2f4cca-09b7-44dc-9458-298b0e3c8507-kube-api-access-47ck9\") pod \"marketplace-operator-79b997595-txjgw\" (UID: \"8b2f4cca-09b7-44dc-9458-298b0e3c8507\") " pod="openshift-marketplace/marketplace-operator-79b997595-txjgw" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895342 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b18bda87-1ef5-4511-ae6b-d9326a76aca2-auth-proxy-config\") pod \"machine-config-operator-74547568cd-nnqb4\" (UID: \"b18bda87-1ef5-4511-ae6b-d9326a76aca2\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nnqb4" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895369 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pj7zm\" (UniqueName: \"kubernetes.io/projected/da42804b-0fa3-43ee-9566-296c28b8052f-kube-api-access-pj7zm\") pod \"collect-profiles-29402775-ts75f\" (UID: \"da42804b-0fa3-43ee-9566-296c28b8052f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402775-ts75f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895384 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d8042008-6a66-42ca-8f5e-76dd748cf0ba-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-8wsp6\" (UID: \"d8042008-6a66-42ca-8f5e-76dd748cf0ba\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8wsp6" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895400 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n9npv\" (UniqueName: \"kubernetes.io/projected/eaa93d21-d7c9-47f7-bfad-16f6e8a0bfff-kube-api-access-n9npv\") pod \"catalog-operator-68c6474976-p7j5p\" (UID: \"eaa93d21-d7c9-47f7-bfad-16f6e8a0bfff\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p7j5p" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895421 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fabb60f0-0ad0-4cb0-9a64-81ecde15afff-config\") pod \"etcd-operator-b45778765-9z9lt\" (UID: \"fabb60f0-0ad0-4cb0-9a64-81ecde15afff\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9z9lt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895440 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/eaa93d21-d7c9-47f7-bfad-16f6e8a0bfff-profile-collector-cert\") pod \"catalog-operator-68c6474976-p7j5p\" (UID: \"eaa93d21-d7c9-47f7-bfad-16f6e8a0bfff\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p7j5p" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895457 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/edcc6478-c344-433f-9fef-c27760c464fc-signing-key\") pod \"service-ca-9c57cc56f-cwfd8\" (UID: \"edcc6478-c344-433f-9fef-c27760c464fc\") " pod="openshift-service-ca/service-ca-9c57cc56f-cwfd8" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895472 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/581b0050-27fb-4d72-9f11-75f4eb55a783-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-6kv25\" (UID: \"581b0050-27fb-4d72-9f11-75f4eb55a783\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-6kv25" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895487 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2jws\" (UniqueName: \"kubernetes.io/projected/c8ba13cb-4099-4fa8-b0fd-dba5852bd704-kube-api-access-t2jws\") pod \"olm-operator-6b444d44fb-jb467\" (UID: \"c8ba13cb-4099-4fa8-b0fd-dba5852bd704\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jb467" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895508 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8b2f4cca-09b7-44dc-9458-298b0e3c8507-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-txjgw\" (UID: \"8b2f4cca-09b7-44dc-9458-298b0e3c8507\") " pod="openshift-marketplace/marketplace-operator-79b997595-txjgw" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895524 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/29912f3d-383e-4361-b882-48ab47cecb56-bound-sa-token\") pod \"ingress-operator-5b745b69d9-kj5z5\" (UID: \"29912f3d-383e-4361-b882-48ab47cecb56\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kj5z5" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895541 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/f9e69f1b-47a1-4ea5-9d69-b79bf401810a-registration-dir\") pod \"csi-hostpathplugin-zz4kv\" (UID: \"f9e69f1b-47a1-4ea5-9d69-b79bf401810a\") " pod="hostpath-provisioner/csi-hostpathplugin-zz4kv" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895558 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/da42804b-0fa3-43ee-9566-296c28b8052f-secret-volume\") pod \"collect-profiles-29402775-ts75f\" (UID: \"da42804b-0fa3-43ee-9566-296c28b8052f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402775-ts75f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895575 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dzjlk\" (UniqueName: \"kubernetes.io/projected/adb95af6-2754-4b77-94a8-c8df9d429a2c-kube-api-access-dzjlk\") pod \"service-ca-operator-777779d784-8h7nh\" (UID: \"adb95af6-2754-4b77-94a8-c8df9d429a2c\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-8h7nh" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895630 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c8ba13cb-4099-4fa8-b0fd-dba5852bd704-profile-collector-cert\") pod \"olm-operator-6b444d44fb-jb467\" (UID: \"c8ba13cb-4099-4fa8-b0fd-dba5852bd704\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jb467" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895653 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/73f71d36-826a-4890-8f3f-6f1f3f159d5e-metrics-certs\") pod \"router-default-5444994796-gncr9\" (UID: \"73f71d36-826a-4890-8f3f-6f1f3f159d5e\") " pod="openshift-ingress/router-default-5444994796-gncr9" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895669 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/eaa93d21-d7c9-47f7-bfad-16f6e8a0bfff-srv-cert\") pod \"catalog-operator-68c6474976-p7j5p\" (UID: \"eaa93d21-d7c9-47f7-bfad-16f6e8a0bfff\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p7j5p" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895689 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895710 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/b18bda87-1ef5-4511-ae6b-d9326a76aca2-images\") pod \"machine-config-operator-74547568cd-nnqb4\" (UID: \"b18bda87-1ef5-4511-ae6b-d9326a76aca2\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nnqb4" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895740 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/116c03c2-be5b-427a-8143-f40794e102a5-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ktvdg\" (UID: \"116c03c2-be5b-427a-8143-f40794e102a5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ktvdg" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895755 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/19bb1706-6f02-4fe1-ac74-3425bc25376c-apiservice-cert\") pod \"packageserver-d55dfcdfc-jnbdg\" (UID: \"19bb1706-6f02-4fe1-ac74-3425bc25376c\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jnbdg" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895770 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/f9e69f1b-47a1-4ea5-9d69-b79bf401810a-mountpoint-dir\") pod \"csi-hostpathplugin-zz4kv\" (UID: \"f9e69f1b-47a1-4ea5-9d69-b79bf401810a\") " pod="hostpath-provisioner/csi-hostpathplugin-zz4kv" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895788 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/b18bda87-1ef5-4511-ae6b-d9326a76aca2-proxy-tls\") pod \"machine-config-operator-74547568cd-nnqb4\" (UID: \"b18bda87-1ef5-4511-ae6b-d9326a76aca2\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nnqb4" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895810 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/81241e6e-dcc2-4509-8e40-fd330f57a15b-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-jm5zr\" (UID: \"81241e6e-dcc2-4509-8e40-fd330f57a15b\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jm5zr" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895827 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ec1b52c7-15a6-4489-ba94-18d3621f4931-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-n4nhc\" (UID: \"ec1b52c7-15a6-4489-ba94-18d3621f4931\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-n4nhc" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895842 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/116c03c2-be5b-427a-8143-f40794e102a5-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ktvdg\" (UID: \"116c03c2-be5b-427a-8143-f40794e102a5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ktvdg" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895857 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/edcc6478-c344-433f-9fef-c27760c464fc-signing-cabundle\") pod \"service-ca-9c57cc56f-cwfd8\" (UID: \"edcc6478-c344-433f-9fef-c27760c464fc\") " pod="openshift-service-ca/service-ca-9c57cc56f-cwfd8" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895871 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e5c0da48-0f42-4508-be7c-da6125c90874-config\") pod \"kube-apiserver-operator-766d6c64bb-lt8gn\" (UID: \"e5c0da48-0f42-4508-be7c-da6125c90874\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lt8gn" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895891 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ec1b52c7-15a6-4489-ba94-18d3621f4931-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-n4nhc\" (UID: \"ec1b52c7-15a6-4489-ba94-18d3621f4931\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-n4nhc" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895909 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kgbmw\" (UniqueName: \"kubernetes.io/projected/73f71d36-826a-4890-8f3f-6f1f3f159d5e-kube-api-access-kgbmw\") pod \"router-default-5444994796-gncr9\" (UID: \"73f71d36-826a-4890-8f3f-6f1f3f159d5e\") " pod="openshift-ingress/router-default-5444994796-gncr9" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895923 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/adb95af6-2754-4b77-94a8-c8df9d429a2c-serving-cert\") pod \"service-ca-operator-777779d784-8h7nh\" (UID: \"adb95af6-2754-4b77-94a8-c8df9d429a2c\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-8h7nh" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895944 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b58m5\" (UniqueName: \"kubernetes.io/projected/edcc6478-c344-433f-9fef-c27760c464fc-kube-api-access-b58m5\") pod \"service-ca-9c57cc56f-cwfd8\" (UID: \"edcc6478-c344-433f-9fef-c27760c464fc\") " pod="openshift-service-ca/service-ca-9c57cc56f-cwfd8" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895962 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/13abb7ac-3229-4f80-9132-d51f89ec896b-node-bootstrap-token\") pod \"machine-config-server-m2pbv\" (UID: \"13abb7ac-3229-4f80-9132-d51f89ec896b\") " pod="openshift-machine-config-operator/machine-config-server-m2pbv" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.895976 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fabb60f0-0ad0-4cb0-9a64-81ecde15afff-serving-cert\") pod \"etcd-operator-b45778765-9z9lt\" (UID: \"fabb60f0-0ad0-4cb0-9a64-81ecde15afff\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9z9lt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.896000 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/a16c7c46-2c28-444d-8b7d-0ef797877620-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-g2jw7\" (UID: \"a16c7c46-2c28-444d-8b7d-0ef797877620\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-g2jw7" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.896017 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dvkzs\" (UniqueName: \"kubernetes.io/projected/8b2886cf-264e-4ecd-b5f3-05c8974c7990-kube-api-access-dvkzs\") pod \"machine-config-controller-84d6567774-5kk6k\" (UID: \"8b2886cf-264e-4ecd-b5f3-05c8974c7990\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5kk6k" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.896035 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8wzv4\" (UniqueName: \"kubernetes.io/projected/19bb1706-6f02-4fe1-ac74-3425bc25376c-kube-api-access-8wzv4\") pod \"packageserver-d55dfcdfc-jnbdg\" (UID: \"19bb1706-6f02-4fe1-ac74-3425bc25376c\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jnbdg" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.896049 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/f9e69f1b-47a1-4ea5-9d69-b79bf401810a-plugins-dir\") pod \"csi-hostpathplugin-zz4kv\" (UID: \"f9e69f1b-47a1-4ea5-9d69-b79bf401810a\") " pod="hostpath-provisioner/csi-hostpathplugin-zz4kv" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.896070 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/581b0050-27fb-4d72-9f11-75f4eb55a783-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-6kv25\" (UID: \"581b0050-27fb-4d72-9f11-75f4eb55a783\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-6kv25" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.896086 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c8ba13cb-4099-4fa8-b0fd-dba5852bd704-srv-cert\") pod \"olm-operator-6b444d44fb-jb467\" (UID: \"c8ba13cb-4099-4fa8-b0fd-dba5852bd704\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jb467" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.896102 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qvgk4\" (UniqueName: \"kubernetes.io/projected/ec1b52c7-15a6-4489-ba94-18d3621f4931-kube-api-access-qvgk4\") pod \"kube-storage-version-migrator-operator-b67b599dd-n4nhc\" (UID: \"ec1b52c7-15a6-4489-ba94-18d3621f4931\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-n4nhc" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.896118 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/a2386288-e064-42f1-aac0-5866f0179542-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-7w8nb\" (UID: \"a2386288-e064-42f1-aac0-5866f0179542\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-7w8nb" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.896135 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wt8fp\" (UniqueName: \"kubernetes.io/projected/9fa98008-2fa3-4d38-9ebe-53e07a4d4c1d-kube-api-access-wt8fp\") pod \"dns-default-9hccb\" (UID: \"9fa98008-2fa3-4d38-9ebe-53e07a4d4c1d\") " pod="openshift-dns/dns-default-9hccb" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.896150 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/8b2886cf-264e-4ecd-b5f3-05c8974c7990-proxy-tls\") pod \"machine-config-controller-84d6567774-5kk6k\" (UID: \"8b2886cf-264e-4ecd-b5f3-05c8974c7990\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5kk6k" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.896167 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kp6ns\" (UniqueName: \"kubernetes.io/projected/81241e6e-dcc2-4509-8e40-fd330f57a15b-kube-api-access-kp6ns\") pod \"package-server-manager-789f6589d5-jm5zr\" (UID: \"81241e6e-dcc2-4509-8e40-fd330f57a15b\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jm5zr" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.896182 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/8b2f4cca-09b7-44dc-9458-298b0e3c8507-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-txjgw\" (UID: \"8b2f4cca-09b7-44dc-9458-298b0e3c8507\") " pod="openshift-marketplace/marketplace-operator-79b997595-txjgw" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.896226 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/19bb1706-6f02-4fe1-ac74-3425bc25376c-webhook-cert\") pod \"packageserver-d55dfcdfc-jnbdg\" (UID: \"19bb1706-6f02-4fe1-ac74-3425bc25376c\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jnbdg" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.896245 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rt8wv\" (UniqueName: \"kubernetes.io/projected/a16c7c46-2c28-444d-8b7d-0ef797877620-kube-api-access-rt8wv\") pod \"control-plane-machine-set-operator-78cbb6b69f-g2jw7\" (UID: \"a16c7c46-2c28-444d-8b7d-0ef797877620\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-g2jw7" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.896264 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/9fa98008-2fa3-4d38-9ebe-53e07a4d4c1d-metrics-tls\") pod \"dns-default-9hccb\" (UID: \"9fa98008-2fa3-4d38-9ebe-53e07a4d4c1d\") " pod="openshift-dns/dns-default-9hccb" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.896681 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/29912f3d-383e-4361-b882-48ab47cecb56-trusted-ca\") pod \"ingress-operator-5b745b69d9-kj5z5\" (UID: \"29912f3d-383e-4361-b882-48ab47cecb56\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kj5z5" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.896696 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/116c03c2-be5b-427a-8143-f40794e102a5-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ktvdg\" (UID: \"116c03c2-be5b-427a-8143-f40794e102a5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ktvdg" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.896713 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zgp9n\" (UniqueName: \"kubernetes.io/projected/b18bda87-1ef5-4511-ae6b-d9326a76aca2-kube-api-access-zgp9n\") pod \"machine-config-operator-74547568cd-nnqb4\" (UID: \"b18bda87-1ef5-4511-ae6b-d9326a76aca2\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nnqb4" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.896732 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e5c0da48-0f42-4508-be7c-da6125c90874-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-lt8gn\" (UID: \"e5c0da48-0f42-4508-be7c-da6125c90874\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lt8gn" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.896754 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/da42804b-0fa3-43ee-9566-296c28b8052f-config-volume\") pod \"collect-profiles-29402775-ts75f\" (UID: \"da42804b-0fa3-43ee-9566-296c28b8052f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402775-ts75f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.896781 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/73f71d36-826a-4890-8f3f-6f1f3f159d5e-service-ca-bundle\") pod \"router-default-5444994796-gncr9\" (UID: \"73f71d36-826a-4890-8f3f-6f1f3f159d5e\") " pod="openshift-ingress/router-default-5444994796-gncr9" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.896796 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/f9e69f1b-47a1-4ea5-9d69-b79bf401810a-socket-dir\") pod \"csi-hostpathplugin-zz4kv\" (UID: \"f9e69f1b-47a1-4ea5-9d69-b79bf401810a\") " pod="hostpath-provisioner/csi-hostpathplugin-zz4kv" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.896812 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d8042008-6a66-42ca-8f5e-76dd748cf0ba-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-8wsp6\" (UID: \"d8042008-6a66-42ca-8f5e-76dd748cf0ba\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8wsp6" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.896829 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/73f71d36-826a-4890-8f3f-6f1f3f159d5e-stats-auth\") pod \"router-default-5444994796-gncr9\" (UID: \"73f71d36-826a-4890-8f3f-6f1f3f159d5e\") " pod="openshift-ingress/router-default-5444994796-gncr9" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.896845 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gbzl7\" (UniqueName: \"kubernetes.io/projected/b0ea6163-7bb2-458d-bd2b-5ec1b5d4960c-kube-api-access-gbzl7\") pod \"migrator-59844c95c7-7wr5z\" (UID: \"b0ea6163-7bb2-458d-bd2b-5ec1b5d4960c\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-7wr5z" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.896862 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8042008-6a66-42ca-8f5e-76dd748cf0ba-config\") pod \"kube-controller-manager-operator-78b949d7b-8wsp6\" (UID: \"d8042008-6a66-42ca-8f5e-76dd748cf0ba\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8wsp6" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.897469 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8042008-6a66-42ca-8f5e-76dd748cf0ba-config\") pod \"kube-controller-manager-operator-78b949d7b-8wsp6\" (UID: \"d8042008-6a66-42ca-8f5e-76dd748cf0ba\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8wsp6" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.897498 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/f9e69f1b-47a1-4ea5-9d69-b79bf401810a-csi-data-dir\") pod \"csi-hostpathplugin-zz4kv\" (UID: \"f9e69f1b-47a1-4ea5-9d69-b79bf401810a\") " pod="hostpath-provisioner/csi-hostpathplugin-zz4kv" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.898393 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/fabb60f0-0ad0-4cb0-9a64-81ecde15afff-etcd-service-ca\") pod \"etcd-operator-b45778765-9z9lt\" (UID: \"fabb60f0-0ad0-4cb0-9a64-81ecde15afff\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9z9lt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.899136 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fabb60f0-0ad0-4cb0-9a64-81ecde15afff-config\") pod \"etcd-operator-b45778765-9z9lt\" (UID: \"fabb60f0-0ad0-4cb0-9a64-81ecde15afff\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9z9lt" Nov 26 14:17:54 crc kubenswrapper[5037]: E1126 14:17:54.899412 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:17:55.399384462 +0000 UTC m=+142.196154676 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.900065 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/8b2886cf-264e-4ecd-b5f3-05c8974c7990-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-5kk6k\" (UID: \"8b2886cf-264e-4ecd-b5f3-05c8974c7990\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5kk6k" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.900251 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/19bb1706-6f02-4fe1-ac74-3425bc25376c-tmpfs\") pod \"packageserver-d55dfcdfc-jnbdg\" (UID: \"19bb1706-6f02-4fe1-ac74-3425bc25376c\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jnbdg" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.901208 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e5c0da48-0f42-4508-be7c-da6125c90874-config\") pod \"kube-apiserver-operator-766d6c64bb-lt8gn\" (UID: \"e5c0da48-0f42-4508-be7c-da6125c90874\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lt8gn" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.901780 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ec1b52c7-15a6-4489-ba94-18d3621f4931-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-n4nhc\" (UID: \"ec1b52c7-15a6-4489-ba94-18d3621f4931\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-n4nhc" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.902506 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/f9e69f1b-47a1-4ea5-9d69-b79bf401810a-plugins-dir\") pod \"csi-hostpathplugin-zz4kv\" (UID: \"f9e69f1b-47a1-4ea5-9d69-b79bf401810a\") " pod="hostpath-provisioner/csi-hostpathplugin-zz4kv" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.902954 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/f9e69f1b-47a1-4ea5-9d69-b79bf401810a-mountpoint-dir\") pod \"csi-hostpathplugin-zz4kv\" (UID: \"f9e69f1b-47a1-4ea5-9d69-b79bf401810a\") " pod="hostpath-provisioner/csi-hostpathplugin-zz4kv" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.903418 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/f9e69f1b-47a1-4ea5-9d69-b79bf401810a-registration-dir\") pod \"csi-hostpathplugin-zz4kv\" (UID: \"f9e69f1b-47a1-4ea5-9d69-b79bf401810a\") " pod="hostpath-provisioner/csi-hostpathplugin-zz4kv" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.903517 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/f9e69f1b-47a1-4ea5-9d69-b79bf401810a-socket-dir\") pod \"csi-hostpathplugin-zz4kv\" (UID: \"f9e69f1b-47a1-4ea5-9d69-b79bf401810a\") " pod="hostpath-provisioner/csi-hostpathplugin-zz4kv" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.903858 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.905075 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/fabb60f0-0ad0-4cb0-9a64-81ecde15afff-etcd-client\") pod \"etcd-operator-b45778765-9z9lt\" (UID: \"fabb60f0-0ad0-4cb0-9a64-81ecde15afff\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9z9lt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.905593 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/b18bda87-1ef5-4511-ae6b-d9326a76aca2-images\") pod \"machine-config-operator-74547568cd-nnqb4\" (UID: \"b18bda87-1ef5-4511-ae6b-d9326a76aca2\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nnqb4" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.905667 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/8b2886cf-264e-4ecd-b5f3-05c8974c7990-proxy-tls\") pod \"machine-config-controller-84d6567774-5kk6k\" (UID: \"8b2886cf-264e-4ecd-b5f3-05c8974c7990\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5kk6k" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.906341 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9fa98008-2fa3-4d38-9ebe-53e07a4d4c1d-config-volume\") pod \"dns-default-9hccb\" (UID: \"9fa98008-2fa3-4d38-9ebe-53e07a4d4c1d\") " pod="openshift-dns/dns-default-9hccb" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.907229 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c8ba13cb-4099-4fa8-b0fd-dba5852bd704-profile-collector-cert\") pod \"olm-operator-6b444d44fb-jb467\" (UID: \"c8ba13cb-4099-4fa8-b0fd-dba5852bd704\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jb467" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.907640 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/a2386288-e064-42f1-aac0-5866f0179542-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-7w8nb\" (UID: \"a2386288-e064-42f1-aac0-5866f0179542\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-7w8nb" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.908054 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/da42804b-0fa3-43ee-9566-296c28b8052f-config-volume\") pod \"collect-profiles-29402775-ts75f\" (UID: \"da42804b-0fa3-43ee-9566-296c28b8052f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402775-ts75f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.908208 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/eaa93d21-d7c9-47f7-bfad-16f6e8a0bfff-profile-collector-cert\") pod \"catalog-operator-68c6474976-p7j5p\" (UID: \"eaa93d21-d7c9-47f7-bfad-16f6e8a0bfff\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p7j5p" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.908647 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/b18bda87-1ef5-4511-ae6b-d9326a76aca2-proxy-tls\") pod \"machine-config-operator-74547568cd-nnqb4\" (UID: \"b18bda87-1ef5-4511-ae6b-d9326a76aca2\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nnqb4" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.909002 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/9fa98008-2fa3-4d38-9ebe-53e07a4d4c1d-metrics-tls\") pod \"dns-default-9hccb\" (UID: \"9fa98008-2fa3-4d38-9ebe-53e07a4d4c1d\") " pod="openshift-dns/dns-default-9hccb" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.909522 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/29912f3d-383e-4361-b882-48ab47cecb56-trusted-ca\") pod \"ingress-operator-5b745b69d9-kj5z5\" (UID: \"29912f3d-383e-4361-b882-48ab47cecb56\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kj5z5" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.910140 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/73f71d36-826a-4890-8f3f-6f1f3f159d5e-service-ca-bundle\") pod \"router-default-5444994796-gncr9\" (UID: \"73f71d36-826a-4890-8f3f-6f1f3f159d5e\") " pod="openshift-ingress/router-default-5444994796-gncr9" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.910864 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/19bb1706-6f02-4fe1-ac74-3425bc25376c-apiservice-cert\") pod \"packageserver-d55dfcdfc-jnbdg\" (UID: \"19bb1706-6f02-4fe1-ac74-3425bc25376c\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jnbdg" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.910937 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/8b2f4cca-09b7-44dc-9458-298b0e3c8507-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-txjgw\" (UID: \"8b2f4cca-09b7-44dc-9458-298b0e3c8507\") " pod="openshift-marketplace/marketplace-operator-79b997595-txjgw" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.910799 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/a16c7c46-2c28-444d-8b7d-0ef797877620-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-g2jw7\" (UID: \"a16c7c46-2c28-444d-8b7d-0ef797877620\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-g2jw7" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.911715 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/fabb60f0-0ad0-4cb0-9a64-81ecde15afff-etcd-ca\") pod \"etcd-operator-b45778765-9z9lt\" (UID: \"fabb60f0-0ad0-4cb0-9a64-81ecde15afff\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9z9lt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.913950 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/116c03c2-be5b-427a-8143-f40794e102a5-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ktvdg\" (UID: \"116c03c2-be5b-427a-8143-f40794e102a5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ktvdg" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.920633 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b18bda87-1ef5-4511-ae6b-d9326a76aca2-auth-proxy-config\") pod \"machine-config-operator-74547568cd-nnqb4\" (UID: \"b18bda87-1ef5-4511-ae6b-d9326a76aca2\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nnqb4" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.922112 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/116c03c2-be5b-427a-8143-f40794e102a5-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ktvdg\" (UID: \"116c03c2-be5b-427a-8143-f40794e102a5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ktvdg" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.922362 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8b2f4cca-09b7-44dc-9458-298b0e3c8507-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-txjgw\" (UID: \"8b2f4cca-09b7-44dc-9458-298b0e3c8507\") " pod="openshift-marketplace/marketplace-operator-79b997595-txjgw" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.922446 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e5c0da48-0f42-4508-be7c-da6125c90874-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-lt8gn\" (UID: \"e5c0da48-0f42-4508-be7c-da6125c90874\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lt8gn" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.922743 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.922807 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d8042008-6a66-42ca-8f5e-76dd748cf0ba-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-8wsp6\" (UID: \"d8042008-6a66-42ca-8f5e-76dd748cf0ba\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8wsp6" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.922765 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ec1b52c7-15a6-4489-ba94-18d3621f4931-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-n4nhc\" (UID: \"ec1b52c7-15a6-4489-ba94-18d3621f4931\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-n4nhc" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.923751 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/19bb1706-6f02-4fe1-ac74-3425bc25376c-webhook-cert\") pod \"packageserver-d55dfcdfc-jnbdg\" (UID: \"19bb1706-6f02-4fe1-ac74-3425bc25376c\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jnbdg" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.924134 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/da42804b-0fa3-43ee-9566-296c28b8052f-secret-volume\") pod \"collect-profiles-29402775-ts75f\" (UID: \"da42804b-0fa3-43ee-9566-296c28b8052f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402775-ts75f" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.925925 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/73f71d36-826a-4890-8f3f-6f1f3f159d5e-default-certificate\") pod \"router-default-5444994796-gncr9\" (UID: \"73f71d36-826a-4890-8f3f-6f1f3f159d5e\") " pod="openshift-ingress/router-default-5444994796-gncr9" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.925953 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/81241e6e-dcc2-4509-8e40-fd330f57a15b-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-jm5zr\" (UID: \"81241e6e-dcc2-4509-8e40-fd330f57a15b\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jm5zr" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.926517 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/29912f3d-383e-4361-b882-48ab47cecb56-metrics-tls\") pod \"ingress-operator-5b745b69d9-kj5z5\" (UID: \"29912f3d-383e-4361-b882-48ab47cecb56\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kj5z5" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.927581 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fabb60f0-0ad0-4cb0-9a64-81ecde15afff-serving-cert\") pod \"etcd-operator-b45778765-9z9lt\" (UID: \"fabb60f0-0ad0-4cb0-9a64-81ecde15afff\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9z9lt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.930309 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/73f71d36-826a-4890-8f3f-6f1f3f159d5e-stats-auth\") pod \"router-default-5444994796-gncr9\" (UID: \"73f71d36-826a-4890-8f3f-6f1f3f159d5e\") " pod="openshift-ingress/router-default-5444994796-gncr9" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.933621 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/73f71d36-826a-4890-8f3f-6f1f3f159d5e-metrics-certs\") pod \"router-default-5444994796-gncr9\" (UID: \"73f71d36-826a-4890-8f3f-6f1f3f159d5e\") " pod="openshift-ingress/router-default-5444994796-gncr9" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.939373 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-d5k2g"] Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.941598 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 26 14:17:54 crc kubenswrapper[5037]: W1126 14:17:54.953795 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd732fc51_cc4a_49a4_b296_2c40ddc33395.slice/crio-1846c5405af00acd19ee9034ae2401dfbcc90aab65980e4e6f262d0bfeea10c0 WatchSource:0}: Error finding container 1846c5405af00acd19ee9034ae2401dfbcc90aab65980e4e6f262d0bfeea10c0: Status 404 returned error can't find the container with id 1846c5405af00acd19ee9034ae2401dfbcc90aab65980e4e6f262d0bfeea10c0 Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.962405 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.981492 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.998533 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:17:54 crc kubenswrapper[5037]: E1126 14:17:54.998767 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:17:55.498733466 +0000 UTC m=+142.295503650 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:54 crc kubenswrapper[5037]: I1126 14:17:54.998936 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:54 crc kubenswrapper[5037]: E1126 14:17:54.999325 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:17:55.499317792 +0000 UTC m=+142.296087976 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.001918 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.013145 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/adb95af6-2754-4b77-94a8-c8df9d429a2c-config\") pod \"service-ca-operator-777779d784-8h7nh\" (UID: \"adb95af6-2754-4b77-94a8-c8df9d429a2c\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-8h7nh" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.021153 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.033442 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/adb95af6-2754-4b77-94a8-c8df9d429a2c-serving-cert\") pod \"service-ca-operator-777779d784-8h7nh\" (UID: \"adb95af6-2754-4b77-94a8-c8df9d429a2c\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-8h7nh" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.042195 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.062195 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.081149 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.086954 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c8ba13cb-4099-4fa8-b0fd-dba5852bd704-srv-cert\") pod \"olm-operator-6b444d44fb-jb467\" (UID: \"c8ba13cb-4099-4fa8-b0fd-dba5852bd704\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jb467" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.102016 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.102083 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:17:55 crc kubenswrapper[5037]: E1126 14:17:55.102177 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:17:55.602153124 +0000 UTC m=+142.398923308 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.103103 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:55 crc kubenswrapper[5037]: E1126 14:17:55.103486 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:17:55.603477487 +0000 UTC m=+142.400247671 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.115346 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/581b0050-27fb-4d72-9f11-75f4eb55a783-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-6kv25\" (UID: \"581b0050-27fb-4d72-9f11-75f4eb55a783\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-6kv25" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.121060 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.141597 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.144048 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/581b0050-27fb-4d72-9f11-75f4eb55a783-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-6kv25\" (UID: \"581b0050-27fb-4d72-9f11-75f4eb55a783\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-6kv25" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.161420 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.182603 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.201391 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.204478 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:17:55 crc kubenswrapper[5037]: E1126 14:17:55.204658 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:17:55.704626557 +0000 UTC m=+142.501396741 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.204814 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:55 crc kubenswrapper[5037]: E1126 14:17:55.205234 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:17:55.705220653 +0000 UTC m=+142.501990837 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.220872 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.223826 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/edcc6478-c344-433f-9fef-c27760c464fc-signing-cabundle\") pod \"service-ca-9c57cc56f-cwfd8\" (UID: \"edcc6478-c344-433f-9fef-c27760c464fc\") " pod="openshift-service-ca/service-ca-9c57cc56f-cwfd8" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.242270 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.261531 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.267710 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/edcc6478-c344-433f-9fef-c27760c464fc-signing-key\") pod \"service-ca-9c57cc56f-cwfd8\" (UID: \"edcc6478-c344-433f-9fef-c27760c464fc\") " pod="openshift-service-ca/service-ca-9c57cc56f-cwfd8" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.281445 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.300564 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.306232 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:17:55 crc kubenswrapper[5037]: E1126 14:17:55.306450 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:17:55.806413053 +0000 UTC m=+142.603183247 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.306444 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/eaa93d21-d7c9-47f7-bfad-16f6e8a0bfff-srv-cert\") pod \"catalog-operator-68c6474976-p7j5p\" (UID: \"eaa93d21-d7c9-47f7-bfad-16f6e8a0bfff\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p7j5p" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.306783 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:55 crc kubenswrapper[5037]: E1126 14:17:55.307473 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:17:55.80746029 +0000 UTC m=+142.604230474 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.321475 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.341619 5037 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.362454 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.382120 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.402501 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.408958 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:17:55 crc kubenswrapper[5037]: E1126 14:17:55.409491 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:17:55.90944189 +0000 UTC m=+142.706212074 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.410025 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:55 crc kubenswrapper[5037]: E1126 14:17:55.410643 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:17:55.910623081 +0000 UTC m=+142.707393275 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.422699 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.438022 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/77dc662c-6698-4035-b332-13bc5a0f3136-cert\") pod \"ingress-canary-85ns4\" (UID: \"77dc662c-6698-4035-b332-13bc5a0f3136\") " pod="openshift-ingress-canary/ingress-canary-85ns4" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.442303 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.477077 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tmmt7\" (UniqueName: \"kubernetes.io/projected/1e46b121-f4d1-402d-8af2-425b4af276dd-kube-api-access-tmmt7\") pod \"openshift-config-operator-7777fb866f-fpm9x\" (UID: \"1e46b121-f4d1-402d-8af2-425b4af276dd\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-fpm9x" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.497566 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1f3de401-4fc0-48c3-9ecc-0a994b8d5f72-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-h42qk\" (UID: \"1f3de401-4fc0-48c3-9ecc-0a994b8d5f72\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-h42qk" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.511086 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:17:55 crc kubenswrapper[5037]: E1126 14:17:55.511313 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:17:56.011243007 +0000 UTC m=+142.808013191 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.511813 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:55 crc kubenswrapper[5037]: E1126 14:17:55.512420 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:17:56.012390927 +0000 UTC m=+142.809161301 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.515265 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gtcl6\" (UniqueName: \"kubernetes.io/projected/0f85943c-8848-42b9-a4e8-43f2689ba52f-kube-api-access-gtcl6\") pod \"machine-approver-56656f9798-bl76p\" (UID: \"0f85943c-8848-42b9-a4e8-43f2689ba52f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-bl76p" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.534735 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qn8dp\" (UniqueName: \"kubernetes.io/projected/1f3de401-4fc0-48c3-9ecc-0a994b8d5f72-kube-api-access-qn8dp\") pod \"cluster-image-registry-operator-dc59b4c8b-h42qk\" (UID: \"1f3de401-4fc0-48c3-9ecc-0a994b8d5f72\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-h42qk" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.557423 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7tzdz\" (UniqueName: \"kubernetes.io/projected/c324fe4d-b0c6-4c0a-9dd7-10aa517dcce7-kube-api-access-7tzdz\") pod \"machine-api-operator-5694c8668f-l7mvc\" (UID: \"c324fe4d-b0c6-4c0a-9dd7-10aa517dcce7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-l7mvc" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.578898 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wtn7l\" (UniqueName: \"kubernetes.io/projected/d8d223ba-d8fb-48bd-9654-4e8146097407-kube-api-access-wtn7l\") pod \"cluster-samples-operator-665b6dd947-l82xx\" (UID: \"d8d223ba-d8fb-48bd-9654-4e8146097407\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-l82xx" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.580961 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.599962 5037 request.go:700] Waited for 1.937809364s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/secrets?fieldSelector=metadata.name%3Dnode-bootstrapper-token&limit=500&resourceVersion=0 Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.602203 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.609522 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/13abb7ac-3229-4f80-9132-d51f89ec896b-node-bootstrap-token\") pod \"machine-config-server-m2pbv\" (UID: \"13abb7ac-3229-4f80-9132-d51f89ec896b\") " pod="openshift-machine-config-operator/machine-config-server-m2pbv" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.623546 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:17:55 crc kubenswrapper[5037]: E1126 14:17:55.624878 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:17:56.124847183 +0000 UTC m=+142.921617357 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.625363 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.635071 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/13abb7ac-3229-4f80-9132-d51f89ec896b-certs\") pod \"machine-config-server-m2pbv\" (UID: \"13abb7ac-3229-4f80-9132-d51f89ec896b\") " pod="openshift-machine-config-operator/machine-config-server-m2pbv" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.646844 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-l7mvc" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.680375 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7x8w\" (UniqueName: \"kubernetes.io/projected/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-kube-api-access-w7x8w\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.707686 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lh8bd\" (UniqueName: \"kubernetes.io/projected/e109a3ac-c4bc-4f0b-a2d0-ed4eea1f63c4-kube-api-access-lh8bd\") pod \"console-operator-58897d9998-vq8zt\" (UID: \"e109a3ac-c4bc-4f0b-a2d0-ed4eea1f63c4\") " pod="openshift-console-operator/console-operator-58897d9998-vq8zt" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.709160 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-l82xx" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.720446 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-bl76p" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.721744 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqrld\" (UniqueName: \"kubernetes.io/projected/25030986-5796-4784-accd-c465c7c2daa3-kube-api-access-gqrld\") pod \"console-f9d7485db-qfdqh\" (UID: \"25030986-5796-4784-accd-c465c7c2daa3\") " pod="openshift-console/console-f9d7485db-qfdqh" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.726626 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:55 crc kubenswrapper[5037]: E1126 14:17:55.727377 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:17:56.227360359 +0000 UTC m=+143.024130543 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.729630 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-fpm9x" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.737101 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-h42qk" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.741453 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-swkxq\" (UniqueName: \"kubernetes.io/projected/9dfb8a84-f022-4823-b563-5800b665b32f-kube-api-access-swkxq\") pod \"downloads-7954f5f757-vwp8j\" (UID: \"9dfb8a84-f022-4823-b563-5800b665b32f\") " pod="openshift-console/downloads-7954f5f757-vwp8j" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.743129 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-vq8zt" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.761435 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7cl2f\" (UniqueName: \"kubernetes.io/projected/9bd7b7cc-e80c-40aa-a6b8-8b9272ccdfb7-kube-api-access-7cl2f\") pod \"dns-operator-744455d44c-4pktg\" (UID: \"9bd7b7cc-e80c-40aa-a6b8-8b9272ccdfb7\") " pod="openshift-dns-operator/dns-operator-744455d44c-4pktg" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.780928 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rsxzw\" (UniqueName: \"kubernetes.io/projected/13b910b7-69a1-438a-9ebe-d865adc99607-kube-api-access-rsxzw\") pod \"oauth-openshift-558db77b4-c252f\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.808595 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dwrf6\" (UniqueName: \"kubernetes.io/projected/290387b2-4285-4359-bfdc-f89128f0c0a2-kube-api-access-dwrf6\") pod \"route-controller-manager-6576b87f9c-glk27\" (UID: \"290387b2-4285-4359-bfdc-f89128f0c0a2\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-glk27" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.820811 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-sk94z" event={"ID":"ce2d6221-7202-44cf-a85e-dec10e764129","Type":"ContainerStarted","Data":"f779eddf583ad6d58305391dcd83ea594be424fd15fcb3078262ca8f6dddd585"} Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.820865 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-sk94z" event={"ID":"ce2d6221-7202-44cf-a85e-dec10e764129","Type":"ContainerStarted","Data":"18a751b202ab17be8c52ff7cd6c17daf17d8ce0ddac15fcefd0f113baa56eedf"} Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.821824 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-bound-sa-token\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.821879 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-sk94z" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.823134 5037 generic.go:334] "Generic (PLEG): container finished" podID="4471bb32-29a0-435a-b36b-94ab5766b1fb" containerID="e66ad7c0895570fe1406f7d51ecd30133cfe46df4bd51fbc4c4ec4782d514b43" exitCode=0 Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.823177 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lnds7" event={"ID":"4471bb32-29a0-435a-b36b-94ab5766b1fb","Type":"ContainerDied","Data":"e66ad7c0895570fe1406f7d51ecd30133cfe46df4bd51fbc4c4ec4782d514b43"} Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.823192 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lnds7" event={"ID":"4471bb32-29a0-435a-b36b-94ab5766b1fb","Type":"ContainerStarted","Data":"5e29862b9b98d455f2952bb7637be07584f109a00a799432b0c9541995754940"} Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.828264 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.829895 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-bl76p" event={"ID":"0f85943c-8848-42b9-a4e8-43f2689ba52f","Type":"ContainerStarted","Data":"8f8143a3e4ae484b44ca805a4446e5f5aa26432d277bfe3a79e6a3e97fcae047"} Nov 26 14:17:55 crc kubenswrapper[5037]: E1126 14:17:55.835788 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:17:56.335742462 +0000 UTC m=+143.132512646 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.836679 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-sk94z" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.837049 5037 generic.go:334] "Generic (PLEG): container finished" podID="d732fc51-cc4a-49a4-b296-2c40ddc33395" containerID="4ffb95892e5fe5b278c1f604d8213d81395d78b9601027e47071429e7af64d53" exitCode=0 Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.837080 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" event={"ID":"d732fc51-cc4a-49a4-b296-2c40ddc33395","Type":"ContainerDied","Data":"4ffb95892e5fe5b278c1f604d8213d81395d78b9601027e47071429e7af64d53"} Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.837094 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" event={"ID":"d732fc51-cc4a-49a4-b296-2c40ddc33395","Type":"ContainerStarted","Data":"1846c5405af00acd19ee9034ae2401dfbcc90aab65980e4e6f262d0bfeea10c0"} Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.843037 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lds9f\" (UniqueName: \"kubernetes.io/projected/b0d344f6-3e27-4724-a70d-3b91cfb19576-kube-api-access-lds9f\") pod \"openshift-apiserver-operator-796bbdcf4f-5fx6g\" (UID: \"b0d344f6-3e27-4724-a70d-3b91cfb19576\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5fx6g" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.849121 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-glk27" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.855206 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-l7mvc"] Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.864439 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x9f9v\" (UniqueName: \"kubernetes.io/projected/af3320db-8161-492c-89ef-79aff52e898c-kube-api-access-x9f9v\") pod \"authentication-operator-69f744f599-82wc7\" (UID: \"af3320db-8161-492c-89ef-79aff52e898c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-82wc7" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.875258 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.885097 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kncvx\" (UniqueName: \"kubernetes.io/projected/fabb60f0-0ad0-4cb0-9a64-81ecde15afff-kube-api-access-kncvx\") pod \"etcd-operator-b45778765-9z9lt\" (UID: \"fabb60f0-0ad0-4cb0-9a64-81ecde15afff\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9z9lt" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.902839 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rjk2b\" (UniqueName: \"kubernetes.io/projected/13abb7ac-3229-4f80-9132-d51f89ec896b-kube-api-access-rjk2b\") pod \"machine-config-server-m2pbv\" (UID: \"13abb7ac-3229-4f80-9132-d51f89ec896b\") " pod="openshift-machine-config-operator/machine-config-server-m2pbv" Nov 26 14:17:55 crc kubenswrapper[5037]: W1126 14:17:55.917471 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc324fe4d_b0c6_4c0a_9dd7_10aa517dcce7.slice/crio-818f11d44a740f91c480a26c4813c8417ca9284ac14633dff00bd310de2b29b3 WatchSource:0}: Error finding container 818f11d44a740f91c480a26c4813c8417ca9284ac14633dff00bd310de2b29b3: Status 404 returned error can't find the container with id 818f11d44a740f91c480a26c4813c8417ca9284ac14633dff00bd310de2b29b3 Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.921540 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5fx6g" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.923776 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fp4xp\" (UniqueName: \"kubernetes.io/projected/77dc662c-6698-4035-b332-13bc5a0f3136-kube-api-access-fp4xp\") pod \"ingress-canary-85ns4\" (UID: \"77dc662c-6698-4035-b332-13bc5a0f3136\") " pod="openshift-ingress-canary/ingress-canary-85ns4" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.932680 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-vwp8j" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.938246 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:55 crc kubenswrapper[5037]: E1126 14:17:55.940907 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:17:56.440892463 +0000 UTC m=+143.237662637 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.954193 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pp69n\" (UniqueName: \"kubernetes.io/projected/29912f3d-383e-4361-b882-48ab47cecb56-kube-api-access-pp69n\") pod \"ingress-operator-5b745b69d9-kj5z5\" (UID: \"29912f3d-383e-4361-b882-48ab47cecb56\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kj5z5" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.956745 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-qfdqh" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.967227 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pj7zm\" (UniqueName: \"kubernetes.io/projected/da42804b-0fa3-43ee-9566-296c28b8052f-kube-api-access-pj7zm\") pod \"collect-profiles-29402775-ts75f\" (UID: \"da42804b-0fa3-43ee-9566-296c28b8052f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402775-ts75f" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.982902 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-l82xx"] Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.983986 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rt8wv\" (UniqueName: \"kubernetes.io/projected/a16c7c46-2c28-444d-8b7d-0ef797877620-kube-api-access-rt8wv\") pod \"control-plane-machine-set-operator-78cbb6b69f-g2jw7\" (UID: \"a16c7c46-2c28-444d-8b7d-0ef797877620\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-g2jw7" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.985379 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-85ns4" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.990683 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-m2pbv" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.998621 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-82wc7" Nov 26 14:17:55 crc kubenswrapper[5037]: I1126 14:17:55.998993 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wt8fp\" (UniqueName: \"kubernetes.io/projected/9fa98008-2fa3-4d38-9ebe-53e07a4d4c1d-kube-api-access-wt8fp\") pod \"dns-default-9hccb\" (UID: \"9fa98008-2fa3-4d38-9ebe-53e07a4d4c1d\") " pod="openshift-dns/dns-default-9hccb" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.017930 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xsgcw\" (UniqueName: \"kubernetes.io/projected/f9e69f1b-47a1-4ea5-9d69-b79bf401810a-kube-api-access-xsgcw\") pod \"csi-hostpathplugin-zz4kv\" (UID: \"f9e69f1b-47a1-4ea5-9d69-b79bf401810a\") " pod="hostpath-provisioner/csi-hostpathplugin-zz4kv" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.039276 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:17:56 crc kubenswrapper[5037]: E1126 14:17:56.039892 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:17:56.539872928 +0000 UTC m=+143.336643112 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.070880 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-4pktg" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.083626 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-9z9lt" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.089363 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-brt8r\" (UniqueName: \"kubernetes.io/projected/a2386288-e064-42f1-aac0-5866f0179542-kube-api-access-brt8r\") pod \"multus-admission-controller-857f4d67dd-7w8nb\" (UID: \"a2386288-e064-42f1-aac0-5866f0179542\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-7w8nb" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.093054 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dzjlk\" (UniqueName: \"kubernetes.io/projected/adb95af6-2754-4b77-94a8-c8df9d429a2c-kube-api-access-dzjlk\") pod \"service-ca-operator-777779d784-8h7nh\" (UID: \"adb95af6-2754-4b77-94a8-c8df9d429a2c\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-8h7nh" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.112231 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-7w8nb" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.114769 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nccnc\" (UniqueName: \"kubernetes.io/projected/581b0050-27fb-4d72-9f11-75f4eb55a783-kube-api-access-nccnc\") pod \"openshift-controller-manager-operator-756b6f6bc6-6kv25\" (UID: \"581b0050-27fb-4d72-9f11-75f4eb55a783\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-6kv25" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.123567 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8wzv4\" (UniqueName: \"kubernetes.io/projected/19bb1706-6f02-4fe1-ac74-3425bc25376c-kube-api-access-8wzv4\") pod \"packageserver-d55dfcdfc-jnbdg\" (UID: \"19bb1706-6f02-4fe1-ac74-3425bc25376c\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jnbdg" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.138083 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kgbmw\" (UniqueName: \"kubernetes.io/projected/73f71d36-826a-4890-8f3f-6f1f3f159d5e-kube-api-access-kgbmw\") pod \"router-default-5444994796-gncr9\" (UID: \"73f71d36-826a-4890-8f3f-6f1f3f159d5e\") " pod="openshift-ingress/router-default-5444994796-gncr9" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.139813 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-g2jw7" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.140998 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:56 crc kubenswrapper[5037]: E1126 14:17:56.141548 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:17:56.641532921 +0000 UTC m=+143.438303095 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.160956 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/116c03c2-be5b-427a-8143-f40794e102a5-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ktvdg\" (UID: \"116c03c2-be5b-427a-8143-f40794e102a5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ktvdg" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.161296 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jnbdg" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.179925 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zgp9n\" (UniqueName: \"kubernetes.io/projected/b18bda87-1ef5-4511-ae6b-d9326a76aca2-kube-api-access-zgp9n\") pod \"machine-config-operator-74547568cd-nnqb4\" (UID: \"b18bda87-1ef5-4511-ae6b-d9326a76aca2\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nnqb4" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.198368 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b58m5\" (UniqueName: \"kubernetes.io/projected/edcc6478-c344-433f-9fef-c27760c464fc-kube-api-access-b58m5\") pod \"service-ca-9c57cc56f-cwfd8\" (UID: \"edcc6478-c344-433f-9fef-c27760c464fc\") " pod="openshift-service-ca/service-ca-9c57cc56f-cwfd8" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.198893 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-9hccb" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.201898 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e5c0da48-0f42-4508-be7c-da6125c90874-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-lt8gn\" (UID: \"e5c0da48-0f42-4508-be7c-da6125c90874\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lt8gn" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.201925 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402775-ts75f" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.219639 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qvgk4\" (UniqueName: \"kubernetes.io/projected/ec1b52c7-15a6-4489-ba94-18d3621f4931-kube-api-access-qvgk4\") pod \"kube-storage-version-migrator-operator-b67b599dd-n4nhc\" (UID: \"ec1b52c7-15a6-4489-ba94-18d3621f4931\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-n4nhc" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.219880 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-8h7nh" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.238232 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-6kv25" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.242579 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:17:56 crc kubenswrapper[5037]: E1126 14:17:56.243253 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:17:56.743222315 +0000 UTC m=+143.539992509 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.244060 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gbzl7\" (UniqueName: \"kubernetes.io/projected/b0ea6163-7bb2-458d-bd2b-5ec1b5d4960c-kube-api-access-gbzl7\") pod \"migrator-59844c95c7-7wr5z\" (UID: \"b0ea6163-7bb2-458d-bd2b-5ec1b5d4960c\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-7wr5z" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.247563 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-cwfd8" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.259805 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:56 crc kubenswrapper[5037]: E1126 14:17:56.261349 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:17:56.761330885 +0000 UTC m=+143.558101069 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.265411 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2jws\" (UniqueName: \"kubernetes.io/projected/c8ba13cb-4099-4fa8-b0fd-dba5852bd704-kube-api-access-t2jws\") pod \"olm-operator-6b444d44fb-jb467\" (UID: \"c8ba13cb-4099-4fa8-b0fd-dba5852bd704\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jb467" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.273561 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-fpm9x"] Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.273760 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-vq8zt"] Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.278164 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-zz4kv" Nov 26 14:17:56 crc kubenswrapper[5037]: W1126 14:17:56.283497 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod13abb7ac_3229_4f80_9132_d51f89ec896b.slice/crio-ab8238838a963fd8e685d4ae912c811e6bcb1d8494dbeb7b729a863b29f75131 WatchSource:0}: Error finding container ab8238838a963fd8e685d4ae912c811e6bcb1d8494dbeb7b729a863b29f75131: Status 404 returned error can't find the container with id ab8238838a963fd8e685d4ae912c811e6bcb1d8494dbeb7b729a863b29f75131 Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.288195 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-47ck9\" (UniqueName: \"kubernetes.io/projected/8b2f4cca-09b7-44dc-9458-298b0e3c8507-kube-api-access-47ck9\") pod \"marketplace-operator-79b997595-txjgw\" (UID: \"8b2f4cca-09b7-44dc-9458-298b0e3c8507\") " pod="openshift-marketplace/marketplace-operator-79b997595-txjgw" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.301631 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n9npv\" (UniqueName: \"kubernetes.io/projected/eaa93d21-d7c9-47f7-bfad-16f6e8a0bfff-kube-api-access-n9npv\") pod \"catalog-operator-68c6474976-p7j5p\" (UID: \"eaa93d21-d7c9-47f7-bfad-16f6e8a0bfff\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p7j5p" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.321669 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kp6ns\" (UniqueName: \"kubernetes.io/projected/81241e6e-dcc2-4509-8e40-fd330f57a15b-kube-api-access-kp6ns\") pod \"package-server-manager-789f6589d5-jm5zr\" (UID: \"81241e6e-dcc2-4509-8e40-fd330f57a15b\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jm5zr" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.333988 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-h42qk"] Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.365704 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:17:56 crc kubenswrapper[5037]: E1126 14:17:56.367271 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:17:56.867227226 +0000 UTC m=+143.663997550 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.368321 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d8042008-6a66-42ca-8f5e-76dd748cf0ba-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-8wsp6\" (UID: \"d8042008-6a66-42ca-8f5e-76dd748cf0ba\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8wsp6" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.383736 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dvkzs\" (UniqueName: \"kubernetes.io/projected/8b2886cf-264e-4ecd-b5f3-05c8974c7990-kube-api-access-dvkzs\") pod \"machine-config-controller-84d6567774-5kk6k\" (UID: \"8b2886cf-264e-4ecd-b5f3-05c8974c7990\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5kk6k" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.391096 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ktvdg" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.398778 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/29912f3d-383e-4361-b882-48ab47cecb56-bound-sa-token\") pod \"ingress-operator-5b745b69d9-kj5z5\" (UID: \"29912f3d-383e-4361-b882-48ab47cecb56\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kj5z5" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.399433 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8wsp6" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.405062 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jm5zr" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.419506 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-gncr9" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.427075 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-txjgw" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.436483 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5kk6k" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.449353 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lt8gn" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.454582 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-n4nhc" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.468705 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:56 crc kubenswrapper[5037]: E1126 14:17:56.469164 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:17:56.969141594 +0000 UTC m=+143.765911778 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.470477 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nnqb4" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.486771 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-7wr5z" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.513604 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kj5z5" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.528321 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jb467" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.568328 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p7j5p" Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.569539 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:17:56 crc kubenswrapper[5037]: E1126 14:17:56.570552 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:17:57.07050126 +0000 UTC m=+143.867271624 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.638948 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-glk27"] Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.668805 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5fx6g"] Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.671729 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:56 crc kubenswrapper[5037]: E1126 14:17:56.672126 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:17:57.172110781 +0000 UTC m=+143.968880965 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.682752 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-c252f"] Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.772325 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:17:56 crc kubenswrapper[5037]: E1126 14:17:56.772508 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:17:57.272478941 +0000 UTC m=+144.069249125 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.773708 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:56 crc kubenswrapper[5037]: E1126 14:17:56.775549 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:17:57.275534639 +0000 UTC m=+144.072304823 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.864463 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-fpm9x" event={"ID":"1e46b121-f4d1-402d-8af2-425b4af276dd","Type":"ContainerStarted","Data":"d6658f78b4bc3af637fa27641265951ced11764acd385ffe50cb3c6b5196dff6"} Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.875197 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:17:56 crc kubenswrapper[5037]: E1126 14:17:56.875933 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:17:57.375721445 +0000 UTC m=+144.172491629 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.882572 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-bl76p" event={"ID":"0f85943c-8848-42b9-a4e8-43f2689ba52f","Type":"ContainerStarted","Data":"d2719cf627a0221726b82bf0cad4e651fd603d09108ffc1718f35f35b6bf37a8"} Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.904947 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lnds7" event={"ID":"4471bb32-29a0-435a-b36b-94ab5766b1fb","Type":"ContainerStarted","Data":"2338df7616b6b40e1f939c5c73e52def73c53950a8958fa91f15145b9c20f25b"} Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.930102 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-gncr9" event={"ID":"73f71d36-826a-4890-8f3f-6f1f3f159d5e","Type":"ContainerStarted","Data":"dc9e919eb8dcd5221b4ab5c51d21046e25940987ced5bdf64b45256eb0dd4e10"} Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.969046 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-m2pbv" event={"ID":"13abb7ac-3229-4f80-9132-d51f89ec896b","Type":"ContainerStarted","Data":"ab8238838a963fd8e685d4ae912c811e6bcb1d8494dbeb7b729a863b29f75131"} Nov 26 14:17:56 crc kubenswrapper[5037]: I1126 14:17:56.988400 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:57 crc kubenswrapper[5037]: E1126 14:17:56.989838 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:17:57.489819913 +0000 UTC m=+144.286590097 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:57 crc kubenswrapper[5037]: I1126 14:17:57.001339 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-l7mvc" event={"ID":"c324fe4d-b0c6-4c0a-9dd7-10aa517dcce7","Type":"ContainerStarted","Data":"cd2db91ed29b7e95fdcf25ce0332e39ac0c5da83d26907213cfbea681d8860b3"} Nov 26 14:17:57 crc kubenswrapper[5037]: I1126 14:17:57.001385 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-l7mvc" event={"ID":"c324fe4d-b0c6-4c0a-9dd7-10aa517dcce7","Type":"ContainerStarted","Data":"818f11d44a740f91c480a26c4813c8417ca9284ac14633dff00bd310de2b29b3"} Nov 26 14:17:57 crc kubenswrapper[5037]: I1126 14:17:57.004716 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-vq8zt" event={"ID":"e109a3ac-c4bc-4f0b-a2d0-ed4eea1f63c4","Type":"ContainerStarted","Data":"1c523660b5cbe81f1dbd262f3548831c9a9f6e81e796907734832184ebd1723f"} Nov 26 14:17:57 crc kubenswrapper[5037]: I1126 14:17:57.009880 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" event={"ID":"d732fc51-cc4a-49a4-b296-2c40ddc33395","Type":"ContainerStarted","Data":"40679f2b97c5dbd4aad78ea6b8f8fee2f3e32b0b6ced14569c0a3152731f93e3"} Nov 26 14:17:57 crc kubenswrapper[5037]: I1126 14:17:57.021763 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-l82xx" event={"ID":"d8d223ba-d8fb-48bd-9654-4e8146097407","Type":"ContainerStarted","Data":"238be35aaf56a5886cef847dd42c618604b01b79e259146681b3c0b79f84e1eb"} Nov 26 14:17:57 crc kubenswrapper[5037]: I1126 14:17:57.030896 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-h42qk" event={"ID":"1f3de401-4fc0-48c3-9ecc-0a994b8d5f72","Type":"ContainerStarted","Data":"9bfac2113014b42d63b8843afbb1c1e53491589487d53154056724cb97bbcdc7"} Nov 26 14:17:57 crc kubenswrapper[5037]: I1126 14:17:57.096081 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:17:57 crc kubenswrapper[5037]: E1126 14:17:57.097421 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:17:57.597396486 +0000 UTC m=+144.394166670 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:57 crc kubenswrapper[5037]: I1126 14:17:57.097779 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:57 crc kubenswrapper[5037]: E1126 14:17:57.099506 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:17:57.59949683 +0000 UTC m=+144.396267004 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:57 crc kubenswrapper[5037]: I1126 14:17:57.199127 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:17:57 crc kubenswrapper[5037]: E1126 14:17:57.199342 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:17:57.699280855 +0000 UTC m=+144.496051039 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:57 crc kubenswrapper[5037]: I1126 14:17:57.199580 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:57 crc kubenswrapper[5037]: E1126 14:17:57.200042 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:17:57.700034164 +0000 UTC m=+144.496804348 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:57 crc kubenswrapper[5037]: I1126 14:17:57.306785 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:17:57 crc kubenswrapper[5037]: E1126 14:17:57.306961 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:17:57.8069339 +0000 UTC m=+144.603704084 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:57 crc kubenswrapper[5037]: I1126 14:17:57.307156 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:57 crc kubenswrapper[5037]: E1126 14:17:57.307587 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:17:57.807576816 +0000 UTC m=+144.604347000 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:57 crc kubenswrapper[5037]: I1126 14:17:57.410473 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:17:57 crc kubenswrapper[5037]: E1126 14:17:57.411385 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:17:57.911363283 +0000 UTC m=+144.708133467 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:57 crc kubenswrapper[5037]: I1126 14:17:57.514122 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:57 crc kubenswrapper[5037]: E1126 14:17:57.514609 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:17:58.014592066 +0000 UTC m=+144.811362250 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:57 crc kubenswrapper[5037]: I1126 14:17:57.583990 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lnds7" podStartSLOduration=122.583967019 podStartE2EDuration="2m2.583967019s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:17:57.550600591 +0000 UTC m=+144.347370775" watchObservedRunningTime="2025-11-26 14:17:57.583967019 +0000 UTC m=+144.380737203" Nov 26 14:17:57 crc kubenswrapper[5037]: I1126 14:17:57.615026 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:17:57 crc kubenswrapper[5037]: E1126 14:17:57.615219 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:17:58.115182372 +0000 UTC m=+144.911952556 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:57 crc kubenswrapper[5037]: I1126 14:17:57.615365 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:57 crc kubenswrapper[5037]: E1126 14:17:57.615987 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:17:58.115976711 +0000 UTC m=+144.912746895 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:57 crc kubenswrapper[5037]: I1126 14:17:57.649536 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-vwp8j"] Nov 26 14:17:57 crc kubenswrapper[5037]: I1126 14:17:57.716673 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:17:57 crc kubenswrapper[5037]: E1126 14:17:57.716886 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:17:58.216854204 +0000 UTC m=+145.013624388 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:57 crc kubenswrapper[5037]: I1126 14:17:57.717068 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:57 crc kubenswrapper[5037]: E1126 14:17:57.720247 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:17:58.22020341 +0000 UTC m=+145.016973594 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:57 crc kubenswrapper[5037]: I1126 14:17:57.818034 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:17:57 crc kubenswrapper[5037]: E1126 14:17:57.818452 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:17:58.318420695 +0000 UTC m=+145.115190879 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:57 crc kubenswrapper[5037]: I1126 14:17:57.818757 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:57 crc kubenswrapper[5037]: E1126 14:17:57.819353 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:17:58.319333129 +0000 UTC m=+145.116103303 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:57 crc kubenswrapper[5037]: I1126 14:17:57.920013 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:17:57 crc kubenswrapper[5037]: E1126 14:17:57.920675 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:17:58.420641942 +0000 UTC m=+145.217412126 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:57 crc kubenswrapper[5037]: I1126 14:17:57.972063 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-sk94z" podStartSLOduration=122.972039528 podStartE2EDuration="2m2.972039528s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:17:57.971116565 +0000 UTC m=+144.767886749" watchObservedRunningTime="2025-11-26 14:17:57.972039528 +0000 UTC m=+144.768809702" Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.043585 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:58 crc kubenswrapper[5037]: E1126 14:17:58.044103 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:17:58.544070888 +0000 UTC m=+145.340841062 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.072211 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-vq8zt" event={"ID":"e109a3ac-c4bc-4f0b-a2d0-ed4eea1f63c4","Type":"ContainerStarted","Data":"dd878da2d909a8c424d851879c7ce061a6bd1917c7bbd46f6162138778f4b808"} Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.074527 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-vq8zt" Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.076324 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-vwp8j" event={"ID":"9dfb8a84-f022-4823-b563-5800b665b32f","Type":"ContainerStarted","Data":"c76dfaf2de5ed6f514a4a21d23c4041d3b8d0df90888affe1e8cdb13e2828cc5"} Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.106471 5037 patch_prober.go:28] interesting pod/console-operator-58897d9998-vq8zt container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/readyz\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.106538 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-vq8zt" podUID="e109a3ac-c4bc-4f0b-a2d0-ed4eea1f63c4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/readyz\": dial tcp 10.217.0.23:8443: connect: connection refused" Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.116179 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-glk27" event={"ID":"290387b2-4285-4359-bfdc-f89128f0c0a2","Type":"ContainerStarted","Data":"a9deed0dd837df59f7ab69ece1f66419f5f59a7db6659e9220b571a9e144963b"} Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.116235 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-glk27" event={"ID":"290387b2-4285-4359-bfdc-f89128f0c0a2","Type":"ContainerStarted","Data":"574d901382b8a51824f3ad03a3128e00bef13fe83ff84d0f6b90222c37be891f"} Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.116855 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-glk27" Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.147037 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:17:58 crc kubenswrapper[5037]: E1126 14:17:58.149117 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:17:58.649090326 +0000 UTC m=+145.445860510 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.154647 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-h42qk" event={"ID":"1f3de401-4fc0-48c3-9ecc-0a994b8d5f72","Type":"ContainerStarted","Data":"78399a52bde2861a05ccdc3b62082e774caf66ec0977244cd5fbd1946287b90a"} Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.160236 5037 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-glk27 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" start-of-body= Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.160311 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-glk27" podUID="290387b2-4285-4359-bfdc-f89128f0c0a2" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.201130 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-c252f" event={"ID":"13b910b7-69a1-438a-9ebe-d865adc99607","Type":"ContainerStarted","Data":"fabbb490c1b57f17bd5f317af2abcffde728cb5cdc650d7ddc220dbbfd908c2c"} Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.212533 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.232356 5037 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-c252f container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.7:6443/healthz\": dial tcp 10.217.0.7:6443: connect: connection refused" start-of-body= Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.232464 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-c252f" podUID="13b910b7-69a1-438a-9ebe-d865adc99607" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.7:6443/healthz\": dial tcp 10.217.0.7:6443: connect: connection refused" Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.236107 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-l82xx" event={"ID":"d8d223ba-d8fb-48bd-9654-4e8146097407","Type":"ContainerStarted","Data":"97d16bbeddebb8da5f4f60ca9e162bbf55045de7b4ffbd0e3aff6b36a43828cc"} Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.254118 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:58 crc kubenswrapper[5037]: E1126 14:17:58.262132 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:17:58.762108878 +0000 UTC m=+145.558879252 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.263420 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-m2pbv" event={"ID":"13abb7ac-3229-4f80-9132-d51f89ec896b","Type":"ContainerStarted","Data":"3dad5c24ba7f831eb2be28055e0b8d8af53ee5fde5cbde799a30cac474fd570c"} Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.274485 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-glk27" podStartSLOduration=123.274454902 podStartE2EDuration="2m3.274454902s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:17:58.264136649 +0000 UTC m=+145.060906833" watchObservedRunningTime="2025-11-26 14:17:58.274454902 +0000 UTC m=+145.071225086" Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.295978 5037 generic.go:334] "Generic (PLEG): container finished" podID="1e46b121-f4d1-402d-8af2-425b4af276dd" containerID="2325b91a1f2e18b75484bb9825f1873adc8f56f4c5d6acaf12cc9fad1357c051" exitCode=0 Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.296394 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-fpm9x" event={"ID":"1e46b121-f4d1-402d-8af2-425b4af276dd","Type":"ContainerDied","Data":"2325b91a1f2e18b75484bb9825f1873adc8f56f4c5d6acaf12cc9fad1357c051"} Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.313004 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-h42qk" podStartSLOduration=123.31297758 podStartE2EDuration="2m3.31297758s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:17:58.310587839 +0000 UTC m=+145.107358033" watchObservedRunningTime="2025-11-26 14:17:58.31297758 +0000 UTC m=+145.109747764" Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.320933 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-l7mvc" event={"ID":"c324fe4d-b0c6-4c0a-9dd7-10aa517dcce7","Type":"ContainerStarted","Data":"fd44b817f86970b6cd321514786506cdf499b972cbe3709b6647444110c31b12"} Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.357961 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:17:58 crc kubenswrapper[5037]: E1126 14:17:58.360184 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:17:58.860137268 +0000 UTC m=+145.656907452 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.360391 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:58 crc kubenswrapper[5037]: E1126 14:17:58.362186 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:17:58.86216867 +0000 UTC m=+145.658938854 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.406265 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5fx6g" event={"ID":"b0d344f6-3e27-4724-a70d-3b91cfb19576","Type":"ContainerStarted","Data":"1de6d331c96e18766a4d2ab63129148c2ea550c98b93a3a0d1f3f6fdbd8a4a55"} Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.408612 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-vq8zt" podStartSLOduration=123.40860153 podStartE2EDuration="2m3.40860153s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:17:58.408449165 +0000 UTC m=+145.205219369" watchObservedRunningTime="2025-11-26 14:17:58.40860153 +0000 UTC m=+145.205371714" Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.410963 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-c252f" podStartSLOduration=124.410950999 podStartE2EDuration="2m4.410950999s" podCreationTimestamp="2025-11-26 14:15:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:17:58.350447542 +0000 UTC m=+145.147217746" watchObservedRunningTime="2025-11-26 14:17:58.410950999 +0000 UTC m=+145.207721183" Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.464954 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.539205 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5fx6g" podStartSLOduration=124.539171727 podStartE2EDuration="2m4.539171727s" podCreationTimestamp="2025-11-26 14:15:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:17:58.479021689 +0000 UTC m=+145.275791873" watchObservedRunningTime="2025-11-26 14:17:58.539171727 +0000 UTC m=+145.335941901" Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.539852 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-bl76p" podStartSLOduration=124.539844184 podStartE2EDuration="2m4.539844184s" podCreationTimestamp="2025-11-26 14:15:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:17:58.444263706 +0000 UTC m=+145.241033890" watchObservedRunningTime="2025-11-26 14:17:58.539844184 +0000 UTC m=+145.336614368" Nov 26 14:17:58 crc kubenswrapper[5037]: E1126 14:17:58.542855 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:17:58.99558911 +0000 UTC m=+145.792359294 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.577279 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:58 crc kubenswrapper[5037]: E1126 14:17:58.577860 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:17:59.077841719 +0000 UTC m=+145.874611903 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.615211 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-l7mvc" podStartSLOduration=123.615184508 podStartE2EDuration="2m3.615184508s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:17:58.575963132 +0000 UTC m=+145.372733326" watchObservedRunningTime="2025-11-26 14:17:58.615184508 +0000 UTC m=+145.411954692" Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.616020 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-4pktg"] Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.640932 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-m2pbv" podStartSLOduration=5.640908501 podStartE2EDuration="5.640908501s" podCreationTimestamp="2025-11-26 14:17:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:17:58.640161873 +0000 UTC m=+145.436932057" watchObservedRunningTime="2025-11-26 14:17:58.640908501 +0000 UTC m=+145.437678685" Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.674370 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-85ns4"] Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.679989 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:17:58 crc kubenswrapper[5037]: E1126 14:17:58.680695 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:17:59.180669392 +0000 UTC m=+145.977439576 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.709410 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402775-ts75f"] Nov 26 14:17:58 crc kubenswrapper[5037]: W1126 14:17:58.727688 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaf3320db_8161_492c_89ef_79aff52e898c.slice/crio-fda56eac3db91df60b49a05442edbdcdeec16e5425d7c60a5b5676425b264c10 WatchSource:0}: Error finding container fda56eac3db91df60b49a05442edbdcdeec16e5425d7c60a5b5676425b264c10: Status 404 returned error can't find the container with id fda56eac3db91df60b49a05442edbdcdeec16e5425d7c60a5b5676425b264c10 Nov 26 14:17:58 crc kubenswrapper[5037]: W1126 14:17:58.731985 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podda42804b_0fa3_43ee_9566_296c28b8052f.slice/crio-716aa628b01c5b4c676b731f3649ee5809b6b977ec3249cda37fa4dfea60ab3b WatchSource:0}: Error finding container 716aa628b01c5b4c676b731f3649ee5809b6b977ec3249cda37fa4dfea60ab3b: Status 404 returned error can't find the container with id 716aa628b01c5b4c676b731f3649ee5809b6b977ec3249cda37fa4dfea60ab3b Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.735080 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-9hccb"] Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.757879 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-g2jw7"] Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.772797 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-82wc7"] Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.782413 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-7w8nb"] Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.783331 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:58 crc kubenswrapper[5037]: E1126 14:17:58.783720 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:17:59.283691639 +0000 UTC m=+146.080461823 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.814388 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-qfdqh"] Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.821814 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p7j5p"] Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.885645 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:17:58 crc kubenswrapper[5037]: E1126 14:17:58.886401 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:17:59.386362678 +0000 UTC m=+146.183132862 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.892852 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-9z9lt"] Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.903185 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-cwfd8"] Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.923747 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-8h7nh"] Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.932715 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jnbdg"] Nov 26 14:17:58 crc kubenswrapper[5037]: W1126 14:17:58.946487 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfabb60f0_0ad0_4cb0_9a64_81ecde15afff.slice/crio-d650bd1777e65320bc10b439adf46dd8f0aa44a9f1852819f873bc55943de720 WatchSource:0}: Error finding container d650bd1777e65320bc10b439adf46dd8f0aa44a9f1852819f873bc55943de720: Status 404 returned error can't find the container with id d650bd1777e65320bc10b439adf46dd8f0aa44a9f1852819f873bc55943de720 Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.960512 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-nnqb4"] Nov 26 14:17:58 crc kubenswrapper[5037]: I1126 14:17:58.987101 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:58 crc kubenswrapper[5037]: E1126 14:17:58.988219 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:17:59.488205625 +0000 UTC m=+146.284975809 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:59 crc kubenswrapper[5037]: W1126 14:17:59.011438 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb18bda87_1ef5_4511_ae6b_d9326a76aca2.slice/crio-62746b54c8de8d8603980d505283f8d0e940dd8d5ee39003a66177a91bbd8997 WatchSource:0}: Error finding container 62746b54c8de8d8603980d505283f8d0e940dd8d5ee39003a66177a91bbd8997: Status 404 returned error can't find the container with id 62746b54c8de8d8603980d505283f8d0e940dd8d5ee39003a66177a91bbd8997 Nov 26 14:17:59 crc kubenswrapper[5037]: W1126 14:17:59.012428 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podadb95af6_2754_4b77_94a8_c8df9d429a2c.slice/crio-0b285ce4026fe405fcc288b533b3ada379170991b9685e6642f55205bd87cb04 WatchSource:0}: Error finding container 0b285ce4026fe405fcc288b533b3ada379170991b9685e6642f55205bd87cb04: Status 404 returned error can't find the container with id 0b285ce4026fe405fcc288b533b3ada379170991b9685e6642f55205bd87cb04 Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.020342 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-zz4kv"] Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.090731 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:17:59 crc kubenswrapper[5037]: E1126 14:17:59.091218 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:17:59.591200532 +0000 UTC m=+146.387970716 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.195307 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:59 crc kubenswrapper[5037]: E1126 14:17:59.195818 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:17:59.69580182 +0000 UTC m=+146.492572004 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.259985 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-5kk6k"] Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.264112 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jb467"] Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.284269 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-n4nhc"] Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.295756 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-6kv25"] Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.296505 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:17:59 crc kubenswrapper[5037]: E1126 14:17:59.296870 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:17:59.796851527 +0000 UTC m=+146.593621711 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.361578 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lt8gn"] Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.361643 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jm5zr"] Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.368819 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-kj5z5"] Nov 26 14:17:59 crc kubenswrapper[5037]: W1126 14:17:59.383038 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc8ba13cb_4099_4fa8_b0fd_dba5852bd704.slice/crio-688a0fc5ff4969b8dd9126d24b0154cb63b0ad12b9ef0842067e939e3f7dd52a WatchSource:0}: Error finding container 688a0fc5ff4969b8dd9126d24b0154cb63b0ad12b9ef0842067e939e3f7dd52a: Status 404 returned error can't find the container with id 688a0fc5ff4969b8dd9126d24b0154cb63b0ad12b9ef0842067e939e3f7dd52a Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.427536 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:59 crc kubenswrapper[5037]: E1126 14:17:59.428250 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:17:59.928223535 +0000 UTC m=+146.724993719 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.453503 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ktvdg"] Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.453562 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-7wr5z"] Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.461669 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-txjgw"] Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.477364 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8wsp6"] Nov 26 14:17:59 crc kubenswrapper[5037]: W1126 14:17:59.522593 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb0ea6163_7bb2_458d_bd2b_5ec1b5d4960c.slice/crio-f57597ccd3bbcb601126ce4e0ffd360d3e7ef94cf31fb503ba6d9bdd68189001 WatchSource:0}: Error finding container f57597ccd3bbcb601126ce4e0ffd360d3e7ef94cf31fb503ba6d9bdd68189001: Status 404 returned error can't find the container with id f57597ccd3bbcb601126ce4e0ffd360d3e7ef94cf31fb503ba6d9bdd68189001 Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.542902 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.543041 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-85ns4" event={"ID":"77dc662c-6698-4035-b332-13bc5a0f3136","Type":"ContainerStarted","Data":"7da98e620af8b3df2374555650ba26351aee28349681cc790f2fe22e72047245"} Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.543085 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-85ns4" event={"ID":"77dc662c-6698-4035-b332-13bc5a0f3136","Type":"ContainerStarted","Data":"fed960687e439c4b47db774d3c251b69dc23d1378d56be2e35200aeb1a83062e"} Nov 26 14:17:59 crc kubenswrapper[5037]: E1126 14:17:59.543597 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:18:00.043572885 +0000 UTC m=+146.840343069 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.575226 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-85ns4" podStartSLOduration=6.575201529 podStartE2EDuration="6.575201529s" podCreationTimestamp="2025-11-26 14:17:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:17:59.574972134 +0000 UTC m=+146.371742318" watchObservedRunningTime="2025-11-26 14:17:59.575201529 +0000 UTC m=+146.371971713" Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.584722 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lnds7" Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.584783 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lnds7" Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.603214 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lnds7" Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.605427 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-c252f" event={"ID":"13b910b7-69a1-438a-9ebe-d865adc99607","Type":"ContainerStarted","Data":"2dbcbfd2f92c71a86a7587ebeb94d010882f9b81a0190c6c8ee23f35c57af1dd"} Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.646081 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:59 crc kubenswrapper[5037]: E1126 14:17:59.646561 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:18:00.146540162 +0000 UTC m=+146.943310346 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.648949 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5fx6g" event={"ID":"b0d344f6-3e27-4724-a70d-3b91cfb19576","Type":"ContainerStarted","Data":"233ae5c66bb3d44d416803484de2ccfc0de25c049803aefc4381641c8a7f1c68"} Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.659502 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p7j5p" event={"ID":"eaa93d21-d7c9-47f7-bfad-16f6e8a0bfff","Type":"ContainerStarted","Data":"9fb03efed3f31a0ceda20a4a3b2252567121a6a3f41602f708b6c3bd3d973aa8"} Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.659561 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p7j5p" event={"ID":"eaa93d21-d7c9-47f7-bfad-16f6e8a0bfff","Type":"ContainerStarted","Data":"55958f16d7f182e4014277f921ef339adbea705af68ad72362e2317ab92e86c7"} Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.660378 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p7j5p" Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.661948 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jnbdg" event={"ID":"19bb1706-6f02-4fe1-ac74-3425bc25376c","Type":"ContainerStarted","Data":"14f1d2f38258dc03141e5826474a33c4165b2a0c447f402af0ff5ee5453cd709"} Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.663641 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jnbdg" Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.664369 5037 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-p7j5p container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.42:8443/healthz\": dial tcp 10.217.0.42:8443: connect: connection refused" start-of-body= Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.664449 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p7j5p" podUID="eaa93d21-d7c9-47f7-bfad-16f6e8a0bfff" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.42:8443/healthz\": dial tcp 10.217.0.42:8443: connect: connection refused" Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.687497 5037 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-jnbdg container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.33:5443/healthz\": dial tcp 10.217.0.33:5443: connect: connection refused" start-of-body= Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.687575 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jnbdg" podUID="19bb1706-6f02-4fe1-ac74-3425bc25376c" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.33:5443/healthz\": dial tcp 10.217.0.33:5443: connect: connection refused" Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.698938 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p7j5p" podStartSLOduration=124.698899362 podStartE2EDuration="2m4.698899362s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:17:59.688075296 +0000 UTC m=+146.484845480" watchObservedRunningTime="2025-11-26 14:17:59.698899362 +0000 UTC m=+146.495669556" Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.713525 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-l82xx" event={"ID":"d8d223ba-d8fb-48bd-9654-4e8146097407","Type":"ContainerStarted","Data":"e69995798dbb2c67c0e9d74b4e23ecb884f42376f3bddf0307a83952556e8a67"} Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.745572 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jnbdg" podStartSLOduration=124.745543606 podStartE2EDuration="2m4.745543606s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:17:59.712749984 +0000 UTC m=+146.509520188" watchObservedRunningTime="2025-11-26 14:17:59.745543606 +0000 UTC m=+146.542313790" Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.747006 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:17:59 crc kubenswrapper[5037]: E1126 14:17:59.751524 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:18:00.251477478 +0000 UTC m=+147.048247662 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.753613 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jb467" event={"ID":"c8ba13cb-4099-4fa8-b0fd-dba5852bd704","Type":"ContainerStarted","Data":"688a0fc5ff4969b8dd9126d24b0154cb63b0ad12b9ef0842067e939e3f7dd52a"} Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.758690 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-l82xx" podStartSLOduration=124.75866119 podStartE2EDuration="2m4.75866119s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:17:59.746268725 +0000 UTC m=+146.543038919" watchObservedRunningTime="2025-11-26 14:17:59.75866119 +0000 UTC m=+146.555431374" Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.777973 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5kk6k" event={"ID":"8b2886cf-264e-4ecd-b5f3-05c8974c7990","Type":"ContainerStarted","Data":"d5c8a39d797d53bbdbb36d108d8e2d46d60dd264ef4ccaf827c3e5321e202c93"} Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.802326 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-cwfd8" event={"ID":"edcc6478-c344-433f-9fef-c27760c464fc","Type":"ContainerStarted","Data":"179fc6fb6b70cb7c6dfe030f23af9c58c47340857f72f90a8136cafe160dd26d"} Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.846701 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-4pktg" event={"ID":"9bd7b7cc-e80c-40aa-a6b8-8b9272ccdfb7","Type":"ContainerStarted","Data":"e05ed20c51b0e1ee9c42404a055d7a13c317e6caaaba46088e877c1cdbb6c3ea"} Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.848862 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:17:59 crc kubenswrapper[5037]: E1126 14:17:59.849798 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:18:00.349776235 +0000 UTC m=+147.146546419 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.858641 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nnqb4" event={"ID":"b18bda87-1ef5-4511-ae6b-d9326a76aca2","Type":"ContainerStarted","Data":"62746b54c8de8d8603980d505283f8d0e940dd8d5ee39003a66177a91bbd8997"} Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.932613 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-gncr9" event={"ID":"73f71d36-826a-4890-8f3f-6f1f3f159d5e","Type":"ContainerStarted","Data":"9319bb6756ae2fb66b73c8b01edc20a4c194f8a49b1c341dfeec96a50afde48a"} Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.937837 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-9z9lt" event={"ID":"fabb60f0-0ad0-4cb0-9a64-81ecde15afff","Type":"ContainerStarted","Data":"d650bd1777e65320bc10b439adf46dd8f0aa44a9f1852819f873bc55943de720"} Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.944577 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-82wc7" event={"ID":"af3320db-8161-492c-89ef-79aff52e898c","Type":"ContainerStarted","Data":"65c5a49645352c5fc902e1204ecf36d8657bfb621682d353e8101225dedf62c6"} Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.944633 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-82wc7" event={"ID":"af3320db-8161-492c-89ef-79aff52e898c","Type":"ContainerStarted","Data":"fda56eac3db91df60b49a05442edbdcdeec16e5425d7c60a5b5676425b264c10"} Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.950072 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:17:59 crc kubenswrapper[5037]: E1126 14:17:59.952427 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:18:00.452397012 +0000 UTC m=+147.249167206 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.956183 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-cwfd8" podStartSLOduration=124.956153078 podStartE2EDuration="2m4.956153078s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:17:59.832903687 +0000 UTC m=+146.629673871" watchObservedRunningTime="2025-11-26 14:17:59.956153078 +0000 UTC m=+146.752923262" Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.992813 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-bl76p" event={"ID":"0f85943c-8848-42b9-a4e8-43f2689ba52f","Type":"ContainerStarted","Data":"01569eb122f07c77556c52c95c0d958020ed05502aa85d5f70fa44a877c5d093"} Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.994148 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-gncr9" podStartSLOduration=124.994121092 podStartE2EDuration="2m4.994121092s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:17:59.957004149 +0000 UTC m=+146.753774333" watchObservedRunningTime="2025-11-26 14:17:59.994121092 +0000 UTC m=+146.790891276" Nov 26 14:17:59 crc kubenswrapper[5037]: I1126 14:17:59.994632 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-82wc7" podStartSLOduration=125.994628045 podStartE2EDuration="2m5.994628045s" podCreationTimestamp="2025-11-26 14:15:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:17:59.994048721 +0000 UTC m=+146.790818915" watchObservedRunningTime="2025-11-26 14:17:59.994628045 +0000 UTC m=+146.791398229" Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.003934 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-g2jw7" event={"ID":"a16c7c46-2c28-444d-8b7d-0ef797877620","Type":"ContainerStarted","Data":"f78c5a01f5b79b68ebaf6148581b27208e45b45501da0e2a0275a6ea30d41f27"} Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.003991 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-g2jw7" event={"ID":"a16c7c46-2c28-444d-8b7d-0ef797877620","Type":"ContainerStarted","Data":"d7883b106f979dc83c7c35561b61d84165445d052ccef59eb742ff72462024ef"} Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.010269 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-zz4kv" event={"ID":"f9e69f1b-47a1-4ea5-9d69-b79bf401810a","Type":"ContainerStarted","Data":"e92c2ed90b03fd6b8c18be24b4799ace1d7b663a3991e76875e5c3b1014455ff"} Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.022337 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-g2jw7" podStartSLOduration=125.022260807 podStartE2EDuration="2m5.022260807s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:18:00.021656782 +0000 UTC m=+146.818426976" watchObservedRunningTime="2025-11-26 14:18:00.022260807 +0000 UTC m=+146.819030991" Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.028503 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-fpm9x" event={"ID":"1e46b121-f4d1-402d-8af2-425b4af276dd","Type":"ContainerStarted","Data":"cfc801afc139c1169ce19a07e67f77701a0e3bde06f9e7fa40f1fc773fd4c803"} Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.030141 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-fpm9x" Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.052524 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:18:00 crc kubenswrapper[5037]: E1126 14:18:00.053675 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:18:00.553655035 +0000 UTC m=+147.350425219 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.055748 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-qfdqh" event={"ID":"25030986-5796-4784-accd-c465c7c2daa3","Type":"ContainerStarted","Data":"a391bab194b21e82f9a65c9d50c1f816a2e9570924ca487704a028e55db59c39"} Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.055801 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-qfdqh" event={"ID":"25030986-5796-4784-accd-c465c7c2daa3","Type":"ContainerStarted","Data":"b249f62189c50e0694913d4601fc3ae5be8a38e8d2e6db18ff14bb125d71cd54"} Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.067941 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-7w8nb" event={"ID":"a2386288-e064-42f1-aac0-5866f0179542","Type":"ContainerStarted","Data":"9a12f8752e822434b4720ed11c6024320a30ea1492ac53b059bb5bc467e10edb"} Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.081064 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-8h7nh" event={"ID":"adb95af6-2754-4b77-94a8-c8df9d429a2c","Type":"ContainerStarted","Data":"0b285ce4026fe405fcc288b533b3ada379170991b9685e6642f55205bd87cb04"} Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.136945 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-9hccb" event={"ID":"9fa98008-2fa3-4d38-9ebe-53e07a4d4c1d","Type":"ContainerStarted","Data":"d76cf87a1efbdca7e7d5e4f1cf6dec25766b4baeaa349ba21962599b6f5218b0"} Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.140203 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-8h7nh" podStartSLOduration=125.140184403 podStartE2EDuration="2m5.140184403s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:18:00.139425775 +0000 UTC m=+146.936195969" watchObservedRunningTime="2025-11-26 14:18:00.140184403 +0000 UTC m=+146.936954587" Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.140576 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-fpm9x" podStartSLOduration=125.140567593 podStartE2EDuration="2m5.140567593s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:18:00.088590872 +0000 UTC m=+146.885361066" watchObservedRunningTime="2025-11-26 14:18:00.140567593 +0000 UTC m=+146.937337777" Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.157804 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:18:00 crc kubenswrapper[5037]: E1126 14:18:00.159063 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:18:00.659011412 +0000 UTC m=+147.455781616 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.221777 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" event={"ID":"d732fc51-cc4a-49a4-b296-2c40ddc33395","Type":"ContainerStarted","Data":"9d3d0bd15e8b4c2b022e57e27452bbae32855f463314f36d9d5c8a80a8f0b813"} Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.246767 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-vwp8j" event={"ID":"9dfb8a84-f022-4823-b563-5800b665b32f","Type":"ContainerStarted","Data":"d616f24ae3bb8c716273cda9850f8e43fcfe3296511f1f29356994f26ff3990c"} Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.248095 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-vwp8j" Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.250353 5037 patch_prober.go:28] interesting pod/downloads-7954f5f757-vwp8j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.250398 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vwp8j" podUID="9dfb8a84-f022-4823-b563-5800b665b32f" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.271866 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:18:00 crc kubenswrapper[5037]: E1126 14:18:00.272321 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:18:00.772303401 +0000 UTC m=+147.569073585 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.286170 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402775-ts75f" event={"ID":"da42804b-0fa3-43ee-9566-296c28b8052f","Type":"ContainerStarted","Data":"e0b94887a7ceb773846edbc19d5674bf4b5cd5e32774aaa15c6c7aa979d9bd40"} Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.286380 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402775-ts75f" event={"ID":"da42804b-0fa3-43ee-9566-296c28b8052f","Type":"ContainerStarted","Data":"716aa628b01c5b4c676b731f3649ee5809b6b977ec3249cda37fa4dfea60ab3b"} Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.288497 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-qfdqh" podStartSLOduration=125.288471311 podStartE2EDuration="2m5.288471311s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:18:00.216466232 +0000 UTC m=+147.013236426" watchObservedRunningTime="2025-11-26 14:18:00.288471311 +0000 UTC m=+147.085241495" Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.301691 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" podStartSLOduration=126.301643686 podStartE2EDuration="2m6.301643686s" podCreationTimestamp="2025-11-26 14:15:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:18:00.286184602 +0000 UTC m=+147.082954796" watchObservedRunningTime="2025-11-26 14:18:00.301643686 +0000 UTC m=+147.098413870" Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.306272 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-glk27" Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.314254 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-vq8zt" Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.346470 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29402775-ts75f" podStartSLOduration=125.346446684 podStartE2EDuration="2m5.346446684s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:18:00.344520165 +0000 UTC m=+147.141290349" watchObservedRunningTime="2025-11-26 14:18:00.346446684 +0000 UTC m=+147.143216868" Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.347421 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lnds7" Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.390567 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-vwp8j" podStartSLOduration=125.390543134 podStartE2EDuration="2m5.390543134s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:18:00.389744453 +0000 UTC m=+147.186514637" watchObservedRunningTime="2025-11-26 14:18:00.390543134 +0000 UTC m=+147.187313318" Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.392263 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:18:00 crc kubenswrapper[5037]: E1126 14:18:00.392840 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:18:00.892815902 +0000 UTC m=+147.689586096 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.406969 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:18:00 crc kubenswrapper[5037]: E1126 14:18:00.415159 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:18:00.915132649 +0000 UTC m=+147.711902853 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.422635 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-gncr9" Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.440504 5037 patch_prober.go:28] interesting pod/router-default-5444994796-gncr9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 14:18:00 crc kubenswrapper[5037]: [-]has-synced failed: reason withheld Nov 26 14:18:00 crc kubenswrapper[5037]: [+]process-running ok Nov 26 14:18:00 crc kubenswrapper[5037]: healthz check failed Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.440581 5037 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gncr9" podUID="73f71d36-826a-4890-8f3f-6f1f3f159d5e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.510350 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:18:00 crc kubenswrapper[5037]: E1126 14:18:00.512216 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:18:01.012188665 +0000 UTC m=+147.808958849 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.609511 5037 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-c252f container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.7:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.621441 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-c252f" podUID="13b910b7-69a1-438a-9ebe-d865adc99607" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.7:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.612586 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:18:00 crc kubenswrapper[5037]: E1126 14:18:00.612962 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:18:01.112946005 +0000 UTC m=+147.909716189 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.725547 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:18:00 crc kubenswrapper[5037]: E1126 14:18:00.726038 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:18:01.225999967 +0000 UTC m=+148.022770151 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.726576 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:18:00 crc kubenswrapper[5037]: E1126 14:18:00.727056 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:18:01.227038863 +0000 UTC m=+148.023809047 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.831010 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:18:00 crc kubenswrapper[5037]: E1126 14:18:00.831246 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:18:01.33120775 +0000 UTC m=+148.127977934 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.831514 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:18:00 crc kubenswrapper[5037]: E1126 14:18:00.832077 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:18:01.332058381 +0000 UTC m=+148.128828565 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.932525 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:18:00 crc kubenswrapper[5037]: E1126 14:18:00.932801 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:18:01.43276418 +0000 UTC m=+148.229534364 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:00 crc kubenswrapper[5037]: I1126 14:18:00.933320 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:18:00 crc kubenswrapper[5037]: E1126 14:18:00.933731 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:18:01.433714444 +0000 UTC m=+148.230484628 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.034106 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:18:01 crc kubenswrapper[5037]: E1126 14:18:01.034674 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:18:01.534647259 +0000 UTC m=+148.331417443 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.136458 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:18:01 crc kubenswrapper[5037]: E1126 14:18:01.137026 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:18:01.637001659 +0000 UTC m=+148.433771843 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.239667 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:18:01 crc kubenswrapper[5037]: E1126 14:18:01.239982 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:18:01.739966196 +0000 UTC m=+148.536736380 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.296229 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-9z9lt" event={"ID":"fabb60f0-0ad0-4cb0-9a64-81ecde15afff","Type":"ContainerStarted","Data":"575e632752266aa97adde12713e02b4aef0e515054548d9b108b6467f877f24d"} Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.298975 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5kk6k" event={"ID":"8b2886cf-264e-4ecd-b5f3-05c8974c7990","Type":"ContainerStarted","Data":"131b9b89c2f005d5b469efef6be9f22e637a8ec916ec811e09c93efa221863c0"} Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.299003 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5kk6k" event={"ID":"8b2886cf-264e-4ecd-b5f3-05c8974c7990","Type":"ContainerStarted","Data":"ab8d70ccc57a4693f2adc06904ce11827ad9df2091ea9a71cacf39d82e988237"} Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.311611 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-cwfd8" event={"ID":"edcc6478-c344-433f-9fef-c27760c464fc","Type":"ContainerStarted","Data":"9b8910ebc6236120f8ffbdeb3065175f520584cb8db0cf52552f6775a5079bd2"} Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.329174 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nnqb4" event={"ID":"b18bda87-1ef5-4511-ae6b-d9326a76aca2","Type":"ContainerStarted","Data":"cdee88d7e1592a5894b8c754d3dd455f83ca36bdd6e1ebfb4ab0b65c578191af"} Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.329223 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nnqb4" event={"ID":"b18bda87-1ef5-4511-ae6b-d9326a76aca2","Type":"ContainerStarted","Data":"92581109cb5e138680cf882d278ac1add3fb27aaba1d168ec4c4827eddb243e3"} Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.337548 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ktvdg" event={"ID":"116c03c2-be5b-427a-8143-f40794e102a5","Type":"ContainerStarted","Data":"5f566f7a178cb59cd382f49c8f1379d732268a110290f0d1d7b539ee51723c2b"} Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.337603 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ktvdg" event={"ID":"116c03c2-be5b-427a-8143-f40794e102a5","Type":"ContainerStarted","Data":"72cb70e0c1e85d061c517f5c61c303cf4ff203dc2aef783157ac9fa8fcd76a4b"} Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.342338 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:18:01 crc kubenswrapper[5037]: E1126 14:18:01.344298 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:18:01.844264975 +0000 UTC m=+148.641035159 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.350004 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-8h7nh" event={"ID":"adb95af6-2754-4b77-94a8-c8df9d429a2c","Type":"ContainerStarted","Data":"c733692a3b778473dbd9b193c91e31b6b4e2cda95685167ab0768c6d5ad13164"} Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.357306 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-9z9lt" podStartSLOduration=126.357263825 podStartE2EDuration="2m6.357263825s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:18:01.329383057 +0000 UTC m=+148.126153241" watchObservedRunningTime="2025-11-26 14:18:01.357263825 +0000 UTC m=+148.154034009" Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.378352 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nnqb4" podStartSLOduration=126.378320151 podStartE2EDuration="2m6.378320151s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:18:01.356490086 +0000 UTC m=+148.153260270" watchObservedRunningTime="2025-11-26 14:18:01.378320151 +0000 UTC m=+148.175090335" Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.401071 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-9hccb" event={"ID":"9fa98008-2fa3-4d38-9ebe-53e07a4d4c1d","Type":"ContainerStarted","Data":"ed9c64fa97f6c8d933f1c9f94f3aafb3592189e2ba3c857cd7ba4d40ab1eb786"} Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.401128 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-9hccb" event={"ID":"9fa98008-2fa3-4d38-9ebe-53e07a4d4c1d","Type":"ContainerStarted","Data":"18072b84a360eaa3c00b9516225b2aa870e2a398d37cbd9ebefd642ddd39ced9"} Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.401992 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-9hccb" Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.434917 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5kk6k" podStartSLOduration=126.434890548 podStartE2EDuration="2m6.434890548s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:18:01.402221308 +0000 UTC m=+148.198991492" watchObservedRunningTime="2025-11-26 14:18:01.434890548 +0000 UTC m=+148.231660732" Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.440606 5037 patch_prober.go:28] interesting pod/router-default-5444994796-gncr9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 14:18:01 crc kubenswrapper[5037]: [-]has-synced failed: reason withheld Nov 26 14:18:01 crc kubenswrapper[5037]: [+]process-running ok Nov 26 14:18:01 crc kubenswrapper[5037]: healthz check failed Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.440678 5037 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gncr9" podUID="73f71d36-826a-4890-8f3f-6f1f3f159d5e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.444206 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.448808 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-4pktg" event={"ID":"9bd7b7cc-e80c-40aa-a6b8-8b9272ccdfb7","Type":"ContainerStarted","Data":"e8235da75b75be0f148297316905c810dadda0e0b252e32bf7b7b46932d51428"} Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.448854 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-4pktg" event={"ID":"9bd7b7cc-e80c-40aa-a6b8-8b9272ccdfb7","Type":"ContainerStarted","Data":"a90693dbb45eeb3b9c1383feeb54b1a5aafa596eafbef354a8ccbe19d333a17b"} Nov 26 14:18:01 crc kubenswrapper[5037]: E1126 14:18:01.451449 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:18:01.951408918 +0000 UTC m=+148.748179102 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.475873 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-9hccb" podStartSLOduration=8.475848248 podStartE2EDuration="8.475848248s" podCreationTimestamp="2025-11-26 14:17:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:18:01.437416262 +0000 UTC m=+148.234186456" watchObservedRunningTime="2025-11-26 14:18:01.475848248 +0000 UTC m=+148.272618432" Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.476328 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ktvdg" podStartSLOduration=126.476323071 podStartE2EDuration="2m6.476323071s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:18:01.47159071 +0000 UTC m=+148.268360894" watchObservedRunningTime="2025-11-26 14:18:01.476323071 +0000 UTC m=+148.273093255" Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.477495 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8wsp6" event={"ID":"d8042008-6a66-42ca-8f5e-76dd748cf0ba","Type":"ContainerStarted","Data":"f1e3d06dce6689e8c01815d7c11f7bf27e0dc84541951940099dda4bc5a54180"} Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.477575 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8wsp6" event={"ID":"d8042008-6a66-42ca-8f5e-76dd748cf0ba","Type":"ContainerStarted","Data":"0aba7dc06b9cc3d8d86260c0197d96085a12bb31576a8785d8e0e00c4d3210a7"} Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.505190 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kj5z5" event={"ID":"29912f3d-383e-4361-b882-48ab47cecb56","Type":"ContainerStarted","Data":"aed9e970062cee41b1c0731a32fa6116c6ba18045a370256f8c5a99df9398507"} Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.505273 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kj5z5" event={"ID":"29912f3d-383e-4361-b882-48ab47cecb56","Type":"ContainerStarted","Data":"5ea51aef2f3a8acf9dd7225753d04140d6fa34d25c3a382851c1461edb4a7f10"} Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.505610 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-4pktg" podStartSLOduration=126.505557773 podStartE2EDuration="2m6.505557773s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:18:01.505343767 +0000 UTC m=+148.302113951" watchObservedRunningTime="2025-11-26 14:18:01.505557773 +0000 UTC m=+148.302327957" Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.541050 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8wsp6" podStartSLOduration=126.541032025 podStartE2EDuration="2m6.541032025s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:18:01.539737882 +0000 UTC m=+148.336508066" watchObservedRunningTime="2025-11-26 14:18:01.541032025 +0000 UTC m=+148.337802209" Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.541745 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lt8gn" event={"ID":"e5c0da48-0f42-4508-be7c-da6125c90874","Type":"ContainerStarted","Data":"8ddd69718e8ed22bed6180752e9813c2d5702524a0d360ea5ddc4c1aabd2a87f"} Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.541805 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lt8gn" event={"ID":"e5c0da48-0f42-4508-be7c-da6125c90874","Type":"ContainerStarted","Data":"ff2430611cb5cab22376092396636f7a992e21a9e0c73647cd50f351fe0457ff"} Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.547264 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:18:01 crc kubenswrapper[5037]: E1126 14:18:01.549623 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:18:02.049604632 +0000 UTC m=+148.846374816 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.573259 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kj5z5" podStartSLOduration=126.573231683 podStartE2EDuration="2m6.573231683s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:18:01.572923505 +0000 UTC m=+148.369693689" watchObservedRunningTime="2025-11-26 14:18:01.573231683 +0000 UTC m=+148.370001867" Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.574783 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-7w8nb" event={"ID":"a2386288-e064-42f1-aac0-5866f0179542","Type":"ContainerStarted","Data":"9d906085d0d1d0bdf153962779255b910dac6c198b790dff1ebd26ccca551936"} Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.586183 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jb467" event={"ID":"c8ba13cb-4099-4fa8-b0fd-dba5852bd704","Type":"ContainerStarted","Data":"72577c704ab4f4fa4d85b486617888c5fa62c2a1e2f18e2a6c2402ecacb43ae5"} Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.587041 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jb467" Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.588773 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-7wr5z" event={"ID":"b0ea6163-7bb2-458d-bd2b-5ec1b5d4960c","Type":"ContainerStarted","Data":"1c7ab82f4bd9e356e319b1bfaa23a7484ddb5cd14326b710e5c63cf6c31ab06a"} Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.588819 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-7wr5z" event={"ID":"b0ea6163-7bb2-458d-bd2b-5ec1b5d4960c","Type":"ContainerStarted","Data":"a7931da6e71aae5f543eca8355098fc22f72bbd078f0434d9fc72127035317d0"} Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.588840 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-7wr5z" event={"ID":"b0ea6163-7bb2-458d-bd2b-5ec1b5d4960c","Type":"ContainerStarted","Data":"f57597ccd3bbcb601126ce4e0ffd360d3e7ef94cf31fb503ba6d9bdd68189001"} Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.598873 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jm5zr" event={"ID":"81241e6e-dcc2-4509-8e40-fd330f57a15b","Type":"ContainerStarted","Data":"8ed6f7f1281c9154312d0ee8b8da8b381656b4543ee7082ec6aab0e4160024c9"} Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.598930 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jm5zr" event={"ID":"81241e6e-dcc2-4509-8e40-fd330f57a15b","Type":"ContainerStarted","Data":"1ba342f9cc28f20f0bc35b71613146feada8e3d9b44e97e4841d98e3adbcff20"} Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.601186 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jm5zr" Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.622080 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jnbdg" event={"ID":"19bb1706-6f02-4fe1-ac74-3425bc25376c","Type":"ContainerStarted","Data":"5a569601fbbffcd5fb52d3070cd02e36e11bf5613925a819930a2059767d10b8"} Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.638155 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jb467" Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.638645 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lt8gn" podStartSLOduration=126.638623033 podStartE2EDuration="2m6.638623033s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:18:01.623807647 +0000 UTC m=+148.420577851" watchObservedRunningTime="2025-11-26 14:18:01.638623033 +0000 UTC m=+148.435393217" Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.649116 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:18:01 crc kubenswrapper[5037]: E1126 14:18:01.649576 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:18:02.149551031 +0000 UTC m=+148.946321215 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.656316 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-n4nhc" event={"ID":"ec1b52c7-15a6-4489-ba94-18d3621f4931","Type":"ContainerStarted","Data":"86b4d0d97bdce1981262132887e5a11914244a4d100c4475a2074bc40fc66b75"} Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.656359 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-n4nhc" event={"ID":"ec1b52c7-15a6-4489-ba94-18d3621f4931","Type":"ContainerStarted","Data":"75f8f6b1331d5906bb6a48a80e2c55f4847793a9d0803c5ffaa4c8c37c4d5646"} Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.670881 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-6kv25" event={"ID":"581b0050-27fb-4d72-9f11-75f4eb55a783","Type":"ContainerStarted","Data":"84c9c438b37d45d33771155244f5a601e211462b999f046b9dd5e391ce92a1b6"} Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.670958 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-6kv25" event={"ID":"581b0050-27fb-4d72-9f11-75f4eb55a783","Type":"ContainerStarted","Data":"f6a78d334b87fd6570d6e7914d4f4df64b993e0c064a70ad4c28da0d4455b3ad"} Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.692070 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-7wr5z" podStartSLOduration=126.692048342 podStartE2EDuration="2m6.692048342s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:18:01.688717906 +0000 UTC m=+148.485488090" watchObservedRunningTime="2025-11-26 14:18:01.692048342 +0000 UTC m=+148.488818526" Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.699401 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-txjgw" event={"ID":"8b2f4cca-09b7-44dc-9458-298b0e3c8507","Type":"ContainerStarted","Data":"2fff1264478bbaef430520300794f4610677c34f3c9285de0cd18e71b13117b2"} Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.699449 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-txjgw" Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.699460 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-txjgw" event={"ID":"8b2f4cca-09b7-44dc-9458-298b0e3c8507","Type":"ContainerStarted","Data":"6d1268fce4fa2cfdc7a92aa145a796669d80e80eefa8d6048c7b9e1ae4c2ebf5"} Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.707968 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-p7j5p" Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.712295 5037 patch_prober.go:28] interesting pod/downloads-7954f5f757-vwp8j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.712643 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vwp8j" podUID="9dfb8a84-f022-4823-b563-5800b665b32f" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.727454 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.736863 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-fpm9x" Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.743490 5037 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-txjgw container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.35:8080/healthz\": dial tcp 10.217.0.35:8080: connect: connection refused" start-of-body= Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.743580 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-txjgw" podUID="8b2f4cca-09b7-44dc-9458-298b0e3c8507" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.35:8080/healthz\": dial tcp 10.217.0.35:8080: connect: connection refused" Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.745308 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jb467" podStartSLOduration=126.745263594 podStartE2EDuration="2m6.745263594s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:18:01.736747147 +0000 UTC m=+148.533517331" watchObservedRunningTime="2025-11-26 14:18:01.745263594 +0000 UTC m=+148.542033788" Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.762420 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:18:01 crc kubenswrapper[5037]: E1126 14:18:01.772593 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:18:02.272570147 +0000 UTC m=+149.069340331 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.870330 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.870551 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.870655 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.870696 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.870718 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:18:01 crc kubenswrapper[5037]: E1126 14:18:01.871188 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:18:02.371171473 +0000 UTC m=+149.167941657 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.879182 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.898505 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.902486 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.923579 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-7w8nb" podStartSLOduration=126.923552653 podStartE2EDuration="2m6.923552653s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:18:01.861734163 +0000 UTC m=+148.658504347" watchObservedRunningTime="2025-11-26 14:18:01.923552653 +0000 UTC m=+148.720322837" Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.923997 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.925524 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.940107 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.944328 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.977564 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:18:01 crc kubenswrapper[5037]: E1126 14:18:01.978039 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:18:02.478022257 +0000 UTC m=+149.274792441 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.996804 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jm5zr" podStartSLOduration=126.996780113 podStartE2EDuration="2m6.996780113s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:18:01.938811341 +0000 UTC m=+148.735581525" watchObservedRunningTime="2025-11-26 14:18:01.996780113 +0000 UTC m=+148.793550297" Nov 26 14:18:01 crc kubenswrapper[5037]: I1126 14:18:01.998806 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-txjgw" podStartSLOduration=126.998799385 podStartE2EDuration="2m6.998799385s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:18:01.996545557 +0000 UTC m=+148.793315761" watchObservedRunningTime="2025-11-26 14:18:01.998799385 +0000 UTC m=+148.795569569" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.010365 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-jnbdg" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.066948 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-6kv25" podStartSLOduration=127.066929555 podStartE2EDuration="2m7.066929555s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:18:02.063746245 +0000 UTC m=+148.860516439" watchObservedRunningTime="2025-11-26 14:18:02.066929555 +0000 UTC m=+148.863699729" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.078971 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:18:02 crc kubenswrapper[5037]: E1126 14:18:02.079539 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:18:02.579503595 +0000 UTC m=+149.376273779 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.079705 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:18:02 crc kubenswrapper[5037]: E1126 14:18:02.080136 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:18:02.580119731 +0000 UTC m=+149.376889905 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.133718 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-n4nhc" podStartSLOduration=127.133696792 podStartE2EDuration="2m7.133696792s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:18:02.132924682 +0000 UTC m=+148.929694866" watchObservedRunningTime="2025-11-26 14:18:02.133696792 +0000 UTC m=+148.930466976" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.181574 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:18:02 crc kubenswrapper[5037]: E1126 14:18:02.181929 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:18:02.681912516 +0000 UTC m=+149.478682700 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.260767 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-wb4bw"] Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.263335 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wb4bw" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.279943 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.283854 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf9611ba-47f1-43da-92fc-a4f99606500a-utilities\") pod \"certified-operators-wb4bw\" (UID: \"bf9611ba-47f1-43da-92fc-a4f99606500a\") " pod="openshift-marketplace/certified-operators-wb4bw" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.283902 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m4fmh\" (UniqueName: \"kubernetes.io/projected/bf9611ba-47f1-43da-92fc-a4f99606500a-kube-api-access-m4fmh\") pod \"certified-operators-wb4bw\" (UID: \"bf9611ba-47f1-43da-92fc-a4f99606500a\") " pod="openshift-marketplace/certified-operators-wb4bw" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.283943 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.283987 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf9611ba-47f1-43da-92fc-a4f99606500a-catalog-content\") pod \"certified-operators-wb4bw\" (UID: \"bf9611ba-47f1-43da-92fc-a4f99606500a\") " pod="openshift-marketplace/certified-operators-wb4bw" Nov 26 14:18:02 crc kubenswrapper[5037]: E1126 14:18:02.284443 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:18:02.784426202 +0000 UTC m=+149.581196386 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.306004 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wb4bw"] Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.403150 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.403582 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf9611ba-47f1-43da-92fc-a4f99606500a-utilities\") pod \"certified-operators-wb4bw\" (UID: \"bf9611ba-47f1-43da-92fc-a4f99606500a\") " pod="openshift-marketplace/certified-operators-wb4bw" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.403617 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m4fmh\" (UniqueName: \"kubernetes.io/projected/bf9611ba-47f1-43da-92fc-a4f99606500a-kube-api-access-m4fmh\") pod \"certified-operators-wb4bw\" (UID: \"bf9611ba-47f1-43da-92fc-a4f99606500a\") " pod="openshift-marketplace/certified-operators-wb4bw" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.403674 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf9611ba-47f1-43da-92fc-a4f99606500a-catalog-content\") pod \"certified-operators-wb4bw\" (UID: \"bf9611ba-47f1-43da-92fc-a4f99606500a\") " pod="openshift-marketplace/certified-operators-wb4bw" Nov 26 14:18:02 crc kubenswrapper[5037]: E1126 14:18:02.404628 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:18:02.904591414 +0000 UTC m=+149.701361598 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.405074 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf9611ba-47f1-43da-92fc-a4f99606500a-utilities\") pod \"certified-operators-wb4bw\" (UID: \"bf9611ba-47f1-43da-92fc-a4f99606500a\") " pod="openshift-marketplace/certified-operators-wb4bw" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.419441 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf9611ba-47f1-43da-92fc-a4f99606500a-catalog-content\") pod \"certified-operators-wb4bw\" (UID: \"bf9611ba-47f1-43da-92fc-a4f99606500a\") " pod="openshift-marketplace/certified-operators-wb4bw" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.423402 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-m8gz7"] Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.425150 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-m8gz7" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.426591 5037 patch_prober.go:28] interesting pod/router-default-5444994796-gncr9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 14:18:02 crc kubenswrapper[5037]: [-]has-synced failed: reason withheld Nov 26 14:18:02 crc kubenswrapper[5037]: [+]process-running ok Nov 26 14:18:02 crc kubenswrapper[5037]: healthz check failed Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.426645 5037 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gncr9" podUID="73f71d36-826a-4890-8f3f-6f1f3f159d5e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.429803 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.462445 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m4fmh\" (UniqueName: \"kubernetes.io/projected/bf9611ba-47f1-43da-92fc-a4f99606500a-kube-api-access-m4fmh\") pod \"certified-operators-wb4bw\" (UID: \"bf9611ba-47f1-43da-92fc-a4f99606500a\") " pod="openshift-marketplace/certified-operators-wb4bw" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.464235 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-m8gz7"] Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.506213 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.506297 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nmgjr\" (UniqueName: \"kubernetes.io/projected/f379a727-1bc7-469d-8148-b7fb1abb5155-kube-api-access-nmgjr\") pod \"community-operators-m8gz7\" (UID: \"f379a727-1bc7-469d-8148-b7fb1abb5155\") " pod="openshift-marketplace/community-operators-m8gz7" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.506429 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f379a727-1bc7-469d-8148-b7fb1abb5155-catalog-content\") pod \"community-operators-m8gz7\" (UID: \"f379a727-1bc7-469d-8148-b7fb1abb5155\") " pod="openshift-marketplace/community-operators-m8gz7" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.506499 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f379a727-1bc7-469d-8148-b7fb1abb5155-utilities\") pod \"community-operators-m8gz7\" (UID: \"f379a727-1bc7-469d-8148-b7fb1abb5155\") " pod="openshift-marketplace/community-operators-m8gz7" Nov 26 14:18:02 crc kubenswrapper[5037]: E1126 14:18:02.506771 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:18:03.0067569 +0000 UTC m=+149.803527084 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.610001 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:18:02 crc kubenswrapper[5037]: E1126 14:18:02.610441 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:18:03.110404363 +0000 UTC m=+149.907174547 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.610465 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.610508 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nmgjr\" (UniqueName: \"kubernetes.io/projected/f379a727-1bc7-469d-8148-b7fb1abb5155-kube-api-access-nmgjr\") pod \"community-operators-m8gz7\" (UID: \"f379a727-1bc7-469d-8148-b7fb1abb5155\") " pod="openshift-marketplace/community-operators-m8gz7" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.610538 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f379a727-1bc7-469d-8148-b7fb1abb5155-catalog-content\") pod \"community-operators-m8gz7\" (UID: \"f379a727-1bc7-469d-8148-b7fb1abb5155\") " pod="openshift-marketplace/community-operators-m8gz7" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.610580 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f379a727-1bc7-469d-8148-b7fb1abb5155-utilities\") pod \"community-operators-m8gz7\" (UID: \"f379a727-1bc7-469d-8148-b7fb1abb5155\") " pod="openshift-marketplace/community-operators-m8gz7" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.611013 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f379a727-1bc7-469d-8148-b7fb1abb5155-utilities\") pod \"community-operators-m8gz7\" (UID: \"f379a727-1bc7-469d-8148-b7fb1abb5155\") " pod="openshift-marketplace/community-operators-m8gz7" Nov 26 14:18:02 crc kubenswrapper[5037]: E1126 14:18:02.611302 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:18:03.111277496 +0000 UTC m=+149.908047680 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.611903 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f379a727-1bc7-469d-8148-b7fb1abb5155-catalog-content\") pod \"community-operators-m8gz7\" (UID: \"f379a727-1bc7-469d-8148-b7fb1abb5155\") " pod="openshift-marketplace/community-operators-m8gz7" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.626144 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-hjjdh"] Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.627182 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hjjdh" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.637529 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nmgjr\" (UniqueName: \"kubernetes.io/projected/f379a727-1bc7-469d-8148-b7fb1abb5155-kube-api-access-nmgjr\") pod \"community-operators-m8gz7\" (UID: \"f379a727-1bc7-469d-8148-b7fb1abb5155\") " pod="openshift-marketplace/community-operators-m8gz7" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.642049 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wb4bw" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.650988 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hjjdh"] Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.711604 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.711954 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6c8b303-75aa-4a87-a45c-fd1776689864-utilities\") pod \"certified-operators-hjjdh\" (UID: \"c6c8b303-75aa-4a87-a45c-fd1776689864\") " pod="openshift-marketplace/certified-operators-hjjdh" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.712045 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2f5wh\" (UniqueName: \"kubernetes.io/projected/c6c8b303-75aa-4a87-a45c-fd1776689864-kube-api-access-2f5wh\") pod \"certified-operators-hjjdh\" (UID: \"c6c8b303-75aa-4a87-a45c-fd1776689864\") " pod="openshift-marketplace/certified-operators-hjjdh" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.712087 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6c8b303-75aa-4a87-a45c-fd1776689864-catalog-content\") pod \"certified-operators-hjjdh\" (UID: \"c6c8b303-75aa-4a87-a45c-fd1776689864\") " pod="openshift-marketplace/certified-operators-hjjdh" Nov 26 14:18:02 crc kubenswrapper[5037]: E1126 14:18:02.712220 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:18:03.21219784 +0000 UTC m=+150.008968024 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.774489 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jm5zr" event={"ID":"81241e6e-dcc2-4509-8e40-fd330f57a15b","Type":"ContainerStarted","Data":"3639afd42b24b9afdd61906d61f969ae612bfb2f835cbaf7db12a114f39c1ec1"} Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.804621 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kj5z5" event={"ID":"29912f3d-383e-4361-b882-48ab47cecb56","Type":"ContainerStarted","Data":"8ed1804225003082bbae7008c37cb9873a862b7de956e550ad8404a32c13ca6a"} Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.809577 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-m8gz7" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.812720 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.812836 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2f5wh\" (UniqueName: \"kubernetes.io/projected/c6c8b303-75aa-4a87-a45c-fd1776689864-kube-api-access-2f5wh\") pod \"certified-operators-hjjdh\" (UID: \"c6c8b303-75aa-4a87-a45c-fd1776689864\") " pod="openshift-marketplace/certified-operators-hjjdh" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.812894 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6c8b303-75aa-4a87-a45c-fd1776689864-catalog-content\") pod \"certified-operators-hjjdh\" (UID: \"c6c8b303-75aa-4a87-a45c-fd1776689864\") " pod="openshift-marketplace/certified-operators-hjjdh" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.812946 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6c8b303-75aa-4a87-a45c-fd1776689864-utilities\") pod \"certified-operators-hjjdh\" (UID: \"c6c8b303-75aa-4a87-a45c-fd1776689864\") " pod="openshift-marketplace/certified-operators-hjjdh" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.813361 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6c8b303-75aa-4a87-a45c-fd1776689864-utilities\") pod \"certified-operators-hjjdh\" (UID: \"c6c8b303-75aa-4a87-a45c-fd1776689864\") " pod="openshift-marketplace/certified-operators-hjjdh" Nov 26 14:18:02 crc kubenswrapper[5037]: E1126 14:18:02.813660 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:18:03.313646498 +0000 UTC m=+150.110416682 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.815740 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6c8b303-75aa-4a87-a45c-fd1776689864-catalog-content\") pod \"certified-operators-hjjdh\" (UID: \"c6c8b303-75aa-4a87-a45c-fd1776689864\") " pod="openshift-marketplace/certified-operators-hjjdh" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.841476 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-smcps"] Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.842388 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-smcps" Nov 26 14:18:02 crc kubenswrapper[5037]: W1126 14:18:02.842745 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b6479f0_333b_4a96_9adf_2099afdc2447.slice/crio-5cecd36a7fcab3add20e753e33fc96b108e0fc2f54e2cf17d9535690604a3b1b WatchSource:0}: Error finding container 5cecd36a7fcab3add20e753e33fc96b108e0fc2f54e2cf17d9535690604a3b1b: Status 404 returned error can't find the container with id 5cecd36a7fcab3add20e753e33fc96b108e0fc2f54e2cf17d9535690604a3b1b Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.851988 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-7w8nb" event={"ID":"a2386288-e064-42f1-aac0-5866f0179542","Type":"ContainerStarted","Data":"2982836c675d1ca916d9bb38d32f560ebd25b2d8278d78ef35bea51b6b950987"} Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.865772 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-smcps"] Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.916041 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:18:02 crc kubenswrapper[5037]: E1126 14:18:02.916295 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:18:03.416248014 +0000 UTC m=+150.213018188 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.916516 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/354f57d7-61fc-44f1-ab03-d7bab0a6a984-utilities\") pod \"community-operators-smcps\" (UID: \"354f57d7-61fc-44f1-ab03-d7bab0a6a984\") " pod="openshift-marketplace/community-operators-smcps" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.916694 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/354f57d7-61fc-44f1-ab03-d7bab0a6a984-catalog-content\") pod \"community-operators-smcps\" (UID: \"354f57d7-61fc-44f1-ab03-d7bab0a6a984\") " pod="openshift-marketplace/community-operators-smcps" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.916817 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.916853 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gtk8x\" (UniqueName: \"kubernetes.io/projected/354f57d7-61fc-44f1-ab03-d7bab0a6a984-kube-api-access-gtk8x\") pod \"community-operators-smcps\" (UID: \"354f57d7-61fc-44f1-ab03-d7bab0a6a984\") " pod="openshift-marketplace/community-operators-smcps" Nov 26 14:18:02 crc kubenswrapper[5037]: E1126 14:18:02.918320 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:18:03.418298806 +0000 UTC m=+150.215068990 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.919551 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2f5wh\" (UniqueName: \"kubernetes.io/projected/c6c8b303-75aa-4a87-a45c-fd1776689864-kube-api-access-2f5wh\") pod \"certified-operators-hjjdh\" (UID: \"c6c8b303-75aa-4a87-a45c-fd1776689864\") " pod="openshift-marketplace/certified-operators-hjjdh" Nov 26 14:18:02 crc kubenswrapper[5037]: W1126 14:18:02.931317 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d751cbb_f2e2_430d_9754_c882a5e924a5.slice/crio-0320ad9f33cdfd63730ac5fa871b6cad3c76d37b5cd1e82bda0e32ebddf23833 WatchSource:0}: Error finding container 0320ad9f33cdfd63730ac5fa871b6cad3c76d37b5cd1e82bda0e32ebddf23833: Status 404 returned error can't find the container with id 0320ad9f33cdfd63730ac5fa871b6cad3c76d37b5cd1e82bda0e32ebddf23833 Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.941767 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-zz4kv" event={"ID":"f9e69f1b-47a1-4ea5-9d69-b79bf401810a","Type":"ContainerStarted","Data":"6a85e23814cae34b93fde96073c960c0b1a25d37f4e2a016dc8b61b2a3a2106e"} Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.953721 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"6109bc14a982ca6c63411dbbd668b34871c4c34325655c4b7edc1c0bdee25b2b"} Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.957066 5037 patch_prober.go:28] interesting pod/downloads-7954f5f757-vwp8j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.957133 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vwp8j" podUID="9dfb8a84-f022-4823-b563-5800b665b32f" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.975155 5037 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-txjgw container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.35:8080/healthz\": dial tcp 10.217.0.35:8080: connect: connection refused" start-of-body= Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.975214 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-txjgw" podUID="8b2f4cca-09b7-44dc-9458-298b0e3c8507" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.35:8080/healthz\": dial tcp 10.217.0.35:8080: connect: connection refused" Nov 26 14:18:02 crc kubenswrapper[5037]: I1126 14:18:02.976751 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hjjdh" Nov 26 14:18:03 crc kubenswrapper[5037]: I1126 14:18:03.017387 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:18:03 crc kubenswrapper[5037]: I1126 14:18:03.017628 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/354f57d7-61fc-44f1-ab03-d7bab0a6a984-utilities\") pod \"community-operators-smcps\" (UID: \"354f57d7-61fc-44f1-ab03-d7bab0a6a984\") " pod="openshift-marketplace/community-operators-smcps" Nov 26 14:18:03 crc kubenswrapper[5037]: I1126 14:18:03.017671 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/354f57d7-61fc-44f1-ab03-d7bab0a6a984-catalog-content\") pod \"community-operators-smcps\" (UID: \"354f57d7-61fc-44f1-ab03-d7bab0a6a984\") " pod="openshift-marketplace/community-operators-smcps" Nov 26 14:18:03 crc kubenswrapper[5037]: I1126 14:18:03.017705 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gtk8x\" (UniqueName: \"kubernetes.io/projected/354f57d7-61fc-44f1-ab03-d7bab0a6a984-kube-api-access-gtk8x\") pod \"community-operators-smcps\" (UID: \"354f57d7-61fc-44f1-ab03-d7bab0a6a984\") " pod="openshift-marketplace/community-operators-smcps" Nov 26 14:18:03 crc kubenswrapper[5037]: E1126 14:18:03.019224 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:18:03.519208479 +0000 UTC m=+150.315978663 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:03 crc kubenswrapper[5037]: I1126 14:18:03.019615 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/354f57d7-61fc-44f1-ab03-d7bab0a6a984-utilities\") pod \"community-operators-smcps\" (UID: \"354f57d7-61fc-44f1-ab03-d7bab0a6a984\") " pod="openshift-marketplace/community-operators-smcps" Nov 26 14:18:03 crc kubenswrapper[5037]: I1126 14:18:03.019957 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/354f57d7-61fc-44f1-ab03-d7bab0a6a984-catalog-content\") pod \"community-operators-smcps\" (UID: \"354f57d7-61fc-44f1-ab03-d7bab0a6a984\") " pod="openshift-marketplace/community-operators-smcps" Nov 26 14:18:03 crc kubenswrapper[5037]: I1126 14:18:03.077183 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gtk8x\" (UniqueName: \"kubernetes.io/projected/354f57d7-61fc-44f1-ab03-d7bab0a6a984-kube-api-access-gtk8x\") pod \"community-operators-smcps\" (UID: \"354f57d7-61fc-44f1-ab03-d7bab0a6a984\") " pod="openshift-marketplace/community-operators-smcps" Nov 26 14:18:03 crc kubenswrapper[5037]: I1126 14:18:03.125940 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:18:03 crc kubenswrapper[5037]: E1126 14:18:03.143687 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:18:03.643668342 +0000 UTC m=+150.440438526 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:03 crc kubenswrapper[5037]: I1126 14:18:03.230038 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:18:03 crc kubenswrapper[5037]: E1126 14:18:03.231998 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:18:03.731975135 +0000 UTC m=+150.528745319 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:03 crc kubenswrapper[5037]: I1126 14:18:03.274975 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-smcps" Nov 26 14:18:03 crc kubenswrapper[5037]: I1126 14:18:03.336303 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:18:03 crc kubenswrapper[5037]: E1126 14:18:03.336763 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:18:03.836750857 +0000 UTC m=+150.633521031 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:03 crc kubenswrapper[5037]: I1126 14:18:03.369877 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wb4bw"] Nov 26 14:18:03 crc kubenswrapper[5037]: I1126 14:18:03.439127 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:18:03 crc kubenswrapper[5037]: E1126 14:18:03.439637 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:18:03.939613551 +0000 UTC m=+150.736383735 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:03 crc kubenswrapper[5037]: I1126 14:18:03.445956 5037 patch_prober.go:28] interesting pod/router-default-5444994796-gncr9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 14:18:03 crc kubenswrapper[5037]: [-]has-synced failed: reason withheld Nov 26 14:18:03 crc kubenswrapper[5037]: [+]process-running ok Nov 26 14:18:03 crc kubenswrapper[5037]: healthz check failed Nov 26 14:18:03 crc kubenswrapper[5037]: I1126 14:18:03.446028 5037 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gncr9" podUID="73f71d36-826a-4890-8f3f-6f1f3f159d5e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 14:18:03 crc kubenswrapper[5037]: I1126 14:18:03.532222 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-m8gz7"] Nov 26 14:18:03 crc kubenswrapper[5037]: I1126 14:18:03.543941 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:18:03 crc kubenswrapper[5037]: E1126 14:18:03.544357 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:18:04.044340762 +0000 UTC m=+150.841110946 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:03 crc kubenswrapper[5037]: W1126 14:18:03.559196 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf379a727_1bc7_469d_8148_b7fb1abb5155.slice/crio-7af3924c559a9f71ac43053c217ae871caef7850c211e83d9b23e7060d147b84 WatchSource:0}: Error finding container 7af3924c559a9f71ac43053c217ae871caef7850c211e83d9b23e7060d147b84: Status 404 returned error can't find the container with id 7af3924c559a9f71ac43053c217ae871caef7850c211e83d9b23e7060d147b84 Nov 26 14:18:03 crc kubenswrapper[5037]: I1126 14:18:03.568639 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:18:03 crc kubenswrapper[5037]: I1126 14:18:03.650071 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:18:03 crc kubenswrapper[5037]: E1126 14:18:03.651617 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:18:04.151583836 +0000 UTC m=+150.948354020 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:03 crc kubenswrapper[5037]: I1126 14:18:03.656831 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:18:03 crc kubenswrapper[5037]: E1126 14:18:03.658667 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:18:04.158649376 +0000 UTC m=+150.955419560 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:03 crc kubenswrapper[5037]: I1126 14:18:03.743511 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hjjdh"] Nov 26 14:18:03 crc kubenswrapper[5037]: I1126 14:18:03.758591 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:18:03 crc kubenswrapper[5037]: E1126 14:18:03.758937 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:18:04.258898903 +0000 UTC m=+151.055669087 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:03 crc kubenswrapper[5037]: I1126 14:18:03.759082 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:18:03 crc kubenswrapper[5037]: E1126 14:18:03.759636 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:18:04.259629041 +0000 UTC m=+151.056399225 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:03 crc kubenswrapper[5037]: I1126 14:18:03.860782 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:18:03 crc kubenswrapper[5037]: E1126 14:18:03.861106 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:18:04.361089749 +0000 UTC m=+151.157859933 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:03 crc kubenswrapper[5037]: I1126 14:18:03.963861 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"769180eb58c075da504093ff99ca0f1ef36efde383d86257f9573b53e29d040e"} Nov 26 14:18:03 crc kubenswrapper[5037]: I1126 14:18:03.964178 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"5cecd36a7fcab3add20e753e33fc96b108e0fc2f54e2cf17d9535690604a3b1b"} Nov 26 14:18:03 crc kubenswrapper[5037]: I1126 14:18:03.964957 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:18:03 crc kubenswrapper[5037]: E1126 14:18:03.965370 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:18:04.465353248 +0000 UTC m=+151.262123432 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:03 crc kubenswrapper[5037]: I1126 14:18:03.976150 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m8gz7" event={"ID":"f379a727-1bc7-469d-8148-b7fb1abb5155","Type":"ContainerStarted","Data":"7af3924c559a9f71ac43053c217ae871caef7850c211e83d9b23e7060d147b84"} Nov 26 14:18:03 crc kubenswrapper[5037]: I1126 14:18:03.989140 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-zz4kv" event={"ID":"f9e69f1b-47a1-4ea5-9d69-b79bf401810a","Type":"ContainerStarted","Data":"17a1eebcd97caa7de0726aea7e38fab317ebb8409aa53246517005354830e87d"} Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.035183 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"b45292f04d37f812ea0f01cd5728b528b7bf79598076e015a38f80aea6cc738f"} Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.044394 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"0320ad9f33cdfd63730ac5fa871b6cad3c76d37b5cd1e82bda0e32ebddf23833"} Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.054368 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-smcps"] Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.062572 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wb4bw" event={"ID":"bf9611ba-47f1-43da-92fc-a4f99606500a","Type":"ContainerStarted","Data":"628d5830c87c62fc4bd330045026bdc2a2a64c026696f3ef8d264e175faf0f5a"} Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.068362 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hjjdh" event={"ID":"c6c8b303-75aa-4a87-a45c-fd1776689864","Type":"ContainerStarted","Data":"09e9589fa94c0e614de6d2520f494f10e74f6906029e9fc8d452556d290e84f1"} Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.065962 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:18:04 crc kubenswrapper[5037]: E1126 14:18:04.066048 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:18:04.566024206 +0000 UTC m=+151.362794390 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.069070 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:18:04 crc kubenswrapper[5037]: E1126 14:18:04.070439 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:18:04.570417777 +0000 UTC m=+151.367187961 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.178863 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:18:04 crc kubenswrapper[5037]: E1126 14:18:04.180747 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:18:04.68071929 +0000 UTC m=+151.477489484 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.220246 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-msn7s"] Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.228611 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-msn7s" Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.236078 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.242352 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-msn7s"] Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.281570 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e33b5e20-dd02-4850-b59b-40a271de1b3f-catalog-content\") pod \"redhat-marketplace-msn7s\" (UID: \"e33b5e20-dd02-4850-b59b-40a271de1b3f\") " pod="openshift-marketplace/redhat-marketplace-msn7s" Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.281620 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e33b5e20-dd02-4850-b59b-40a271de1b3f-utilities\") pod \"redhat-marketplace-msn7s\" (UID: \"e33b5e20-dd02-4850-b59b-40a271de1b3f\") " pod="openshift-marketplace/redhat-marketplace-msn7s" Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.281661 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jxksz\" (UniqueName: \"kubernetes.io/projected/e33b5e20-dd02-4850-b59b-40a271de1b3f-kube-api-access-jxksz\") pod \"redhat-marketplace-msn7s\" (UID: \"e33b5e20-dd02-4850-b59b-40a271de1b3f\") " pod="openshift-marketplace/redhat-marketplace-msn7s" Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.281701 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:18:04 crc kubenswrapper[5037]: E1126 14:18:04.282093 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:18:04.782077195 +0000 UTC m=+151.578847379 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.341620 5037 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.385313 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:18:04 crc kubenswrapper[5037]: E1126 14:18:04.385396 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:18:04.885363589 +0000 UTC m=+151.682133773 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.386489 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e33b5e20-dd02-4850-b59b-40a271de1b3f-catalog-content\") pod \"redhat-marketplace-msn7s\" (UID: \"e33b5e20-dd02-4850-b59b-40a271de1b3f\") " pod="openshift-marketplace/redhat-marketplace-msn7s" Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.386603 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e33b5e20-dd02-4850-b59b-40a271de1b3f-utilities\") pod \"redhat-marketplace-msn7s\" (UID: \"e33b5e20-dd02-4850-b59b-40a271de1b3f\") " pod="openshift-marketplace/redhat-marketplace-msn7s" Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.386860 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jxksz\" (UniqueName: \"kubernetes.io/projected/e33b5e20-dd02-4850-b59b-40a271de1b3f-kube-api-access-jxksz\") pod \"redhat-marketplace-msn7s\" (UID: \"e33b5e20-dd02-4850-b59b-40a271de1b3f\") " pod="openshift-marketplace/redhat-marketplace-msn7s" Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.387191 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:18:04 crc kubenswrapper[5037]: E1126 14:18:04.387600 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:18:04.887577156 +0000 UTC m=+151.684347340 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.387822 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e33b5e20-dd02-4850-b59b-40a271de1b3f-catalog-content\") pod \"redhat-marketplace-msn7s\" (UID: \"e33b5e20-dd02-4850-b59b-40a271de1b3f\") " pod="openshift-marketplace/redhat-marketplace-msn7s" Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.389040 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e33b5e20-dd02-4850-b59b-40a271de1b3f-utilities\") pod \"redhat-marketplace-msn7s\" (UID: \"e33b5e20-dd02-4850-b59b-40a271de1b3f\") " pod="openshift-marketplace/redhat-marketplace-msn7s" Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.409525 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jxksz\" (UniqueName: \"kubernetes.io/projected/e33b5e20-dd02-4850-b59b-40a271de1b3f-kube-api-access-jxksz\") pod \"redhat-marketplace-msn7s\" (UID: \"e33b5e20-dd02-4850-b59b-40a271de1b3f\") " pod="openshift-marketplace/redhat-marketplace-msn7s" Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.423940 5037 patch_prober.go:28] interesting pod/router-default-5444994796-gncr9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 14:18:04 crc kubenswrapper[5037]: [-]has-synced failed: reason withheld Nov 26 14:18:04 crc kubenswrapper[5037]: [+]process-running ok Nov 26 14:18:04 crc kubenswrapper[5037]: healthz check failed Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.424016 5037 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gncr9" podUID="73f71d36-826a-4890-8f3f-6f1f3f159d5e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.488975 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:18:04 crc kubenswrapper[5037]: E1126 14:18:04.489482 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:18:04.989435754 +0000 UTC m=+151.786205938 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.489804 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:18:04 crc kubenswrapper[5037]: E1126 14:18:04.490213 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:18:04.990195333 +0000 UTC m=+151.786965517 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.591427 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:18:04 crc kubenswrapper[5037]: E1126 14:18:04.592034 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:18:05.09201335 +0000 UTC m=+151.888783534 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.592362 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:18:04 crc kubenswrapper[5037]: E1126 14:18:04.592736 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:18:05.092722627 +0000 UTC m=+151.889492811 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.615244 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-wsj5x"] Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.616667 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wsj5x" Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.630737 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wsj5x"] Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.634699 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.634738 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.646081 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.654598 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-msn7s" Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.693893 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:18:04 crc kubenswrapper[5037]: E1126 14:18:04.694057 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:18:05.194032842 +0000 UTC m=+151.990803036 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.696098 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.696246 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/707b088e-aa31-4988-8677-6dcac9117725-catalog-content\") pod \"redhat-marketplace-wsj5x\" (UID: \"707b088e-aa31-4988-8677-6dcac9117725\") " pod="openshift-marketplace/redhat-marketplace-wsj5x" Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.696445 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fb4kz\" (UniqueName: \"kubernetes.io/projected/707b088e-aa31-4988-8677-6dcac9117725-kube-api-access-fb4kz\") pod \"redhat-marketplace-wsj5x\" (UID: \"707b088e-aa31-4988-8677-6dcac9117725\") " pod="openshift-marketplace/redhat-marketplace-wsj5x" Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.696612 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/707b088e-aa31-4988-8677-6dcac9117725-utilities\") pod \"redhat-marketplace-wsj5x\" (UID: \"707b088e-aa31-4988-8677-6dcac9117725\") " pod="openshift-marketplace/redhat-marketplace-wsj5x" Nov 26 14:18:04 crc kubenswrapper[5037]: E1126 14:18:04.698076 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:18:05.198061024 +0000 UTC m=+151.994831208 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.801763 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.802430 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/707b088e-aa31-4988-8677-6dcac9117725-catalog-content\") pod \"redhat-marketplace-wsj5x\" (UID: \"707b088e-aa31-4988-8677-6dcac9117725\") " pod="openshift-marketplace/redhat-marketplace-wsj5x" Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.802474 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fb4kz\" (UniqueName: \"kubernetes.io/projected/707b088e-aa31-4988-8677-6dcac9117725-kube-api-access-fb4kz\") pod \"redhat-marketplace-wsj5x\" (UID: \"707b088e-aa31-4988-8677-6dcac9117725\") " pod="openshift-marketplace/redhat-marketplace-wsj5x" Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.802522 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/707b088e-aa31-4988-8677-6dcac9117725-utilities\") pod \"redhat-marketplace-wsj5x\" (UID: \"707b088e-aa31-4988-8677-6dcac9117725\") " pod="openshift-marketplace/redhat-marketplace-wsj5x" Nov 26 14:18:04 crc kubenswrapper[5037]: E1126 14:18:04.802772 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:18:05.302736633 +0000 UTC m=+152.099506817 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.803525 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/707b088e-aa31-4988-8677-6dcac9117725-utilities\") pod \"redhat-marketplace-wsj5x\" (UID: \"707b088e-aa31-4988-8677-6dcac9117725\") " pod="openshift-marketplace/redhat-marketplace-wsj5x" Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.803800 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/707b088e-aa31-4988-8677-6dcac9117725-catalog-content\") pod \"redhat-marketplace-wsj5x\" (UID: \"707b088e-aa31-4988-8677-6dcac9117725\") " pod="openshift-marketplace/redhat-marketplace-wsj5x" Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.834533 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fb4kz\" (UniqueName: \"kubernetes.io/projected/707b088e-aa31-4988-8677-6dcac9117725-kube-api-access-fb4kz\") pod \"redhat-marketplace-wsj5x\" (UID: \"707b088e-aa31-4988-8677-6dcac9117725\") " pod="openshift-marketplace/redhat-marketplace-wsj5x" Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.906268 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:18:04 crc kubenswrapper[5037]: E1126 14:18:04.906724 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:18:05.406702645 +0000 UTC m=+152.203472829 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:04 crc kubenswrapper[5037]: I1126 14:18:04.935811 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wsj5x" Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.007265 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:18:05 crc kubenswrapper[5037]: E1126 14:18:05.007719 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 14:18:05.507692901 +0000 UTC m=+152.304463095 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.085798 5037 generic.go:334] "Generic (PLEG): container finished" podID="da42804b-0fa3-43ee-9566-296c28b8052f" containerID="e0b94887a7ceb773846edbc19d5674bf4b5cd5e32774aaa15c6c7aa979d9bd40" exitCode=0 Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.086371 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402775-ts75f" event={"ID":"da42804b-0fa3-43ee-9566-296c28b8052f","Type":"ContainerDied","Data":"e0b94887a7ceb773846edbc19d5674bf4b5cd5e32774aaa15c6c7aa979d9bd40"} Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.089381 5037 generic.go:334] "Generic (PLEG): container finished" podID="c6c8b303-75aa-4a87-a45c-fd1776689864" containerID="23f84bf55da11b202e24899abf62e8650c151a7464afe07163b25f61edf5d5d8" exitCode=0 Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.089459 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hjjdh" event={"ID":"c6c8b303-75aa-4a87-a45c-fd1776689864","Type":"ContainerDied","Data":"23f84bf55da11b202e24899abf62e8650c151a7464afe07163b25f61edf5d5d8"} Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.091790 5037 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.099363 5037 generic.go:334] "Generic (PLEG): container finished" podID="f379a727-1bc7-469d-8148-b7fb1abb5155" containerID="8c444b1d0fc3bc572c83aa6b76e8d19b5f1c58d235ef6db0662ae427b82707c7" exitCode=0 Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.101461 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m8gz7" event={"ID":"f379a727-1bc7-469d-8148-b7fb1abb5155","Type":"ContainerDied","Data":"8c444b1d0fc3bc572c83aa6b76e8d19b5f1c58d235ef6db0662ae427b82707c7"} Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.103706 5037 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-26T14:18:04.341657669Z","Handler":null,"Name":""} Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.109665 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:18:05 crc kubenswrapper[5037]: E1126 14:18:05.110027 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 14:18:05.61000675 +0000 UTC m=+152.406776934 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-nwzvj" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.125612 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-msn7s"] Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.133243 5037 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.133310 5037 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.144245 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-smcps" event={"ID":"354f57d7-61fc-44f1-ab03-d7bab0a6a984","Type":"ContainerDied","Data":"9be2f0a076ae619178c392e44b984208322a662769d0bd37f8c81cbc2ca07572"} Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.151385 5037 generic.go:334] "Generic (PLEG): container finished" podID="354f57d7-61fc-44f1-ab03-d7bab0a6a984" containerID="9be2f0a076ae619178c392e44b984208322a662769d0bd37f8c81cbc2ca07572" exitCode=0 Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.151607 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-smcps" event={"ID":"354f57d7-61fc-44f1-ab03-d7bab0a6a984","Type":"ContainerStarted","Data":"a08713f1aee66f4b925ad035829039ea516e9f93d92c2a3cc2455b684b61129d"} Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.202825 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-zz4kv" event={"ID":"f9e69f1b-47a1-4ea5-9d69-b79bf401810a","Type":"ContainerStarted","Data":"5f429978d91de29880521b44876b96aa480b464c6c47f2206c93220245aa58b7"} Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.202905 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-zz4kv" event={"ID":"f9e69f1b-47a1-4ea5-9d69-b79bf401810a","Type":"ContainerStarted","Data":"3c767510af2e946074d30b6d649b032d06a995bc4b55346a94dab95c0e1ee0e0"} Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.211582 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.217999 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wsj5x"] Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.222788 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.230601 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"396ef04c5b18047bf180319ab4a5ec23a5be1362224927d5e52e093e0163e1b6"} Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.239682 5037 generic.go:334] "Generic (PLEG): container finished" podID="bf9611ba-47f1-43da-92fc-a4f99606500a" containerID="2c488f9ae65e5c372a57d9f3c5e80d614a49e0fedfa12815b5ba8daebd325eea" exitCode=0 Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.239786 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wb4bw" event={"ID":"bf9611ba-47f1-43da-92fc-a4f99606500a","Type":"ContainerDied","Data":"2c488f9ae65e5c372a57d9f3c5e80d614a49e0fedfa12815b5ba8daebd325eea"} Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.241073 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.244556 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-zz4kv" podStartSLOduration=12.244541018 podStartE2EDuration="12.244541018s" podCreationTimestamp="2025-11-26 14:17:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:18:05.24268613 +0000 UTC m=+152.039456314" watchObservedRunningTime="2025-11-26 14:18:05.244541018 +0000 UTC m=+152.041311202" Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.246940 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-d5k2g" Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.312903 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.326364 5037 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.326411 5037 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.407863 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-nwzvj\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.419643 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-b5sj7"] Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.421690 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b5sj7" Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.428985 5037 patch_prober.go:28] interesting pod/router-default-5444994796-gncr9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 14:18:05 crc kubenswrapper[5037]: [-]has-synced failed: reason withheld Nov 26 14:18:05 crc kubenswrapper[5037]: [+]process-running ok Nov 26 14:18:05 crc kubenswrapper[5037]: healthz check failed Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.429071 5037 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gncr9" podUID="73f71d36-826a-4890-8f3f-6f1f3f159d5e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.432766 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.440256 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-b5sj7"] Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.517983 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/570f926c-8f52-4b77-a139-bfa9d3b61071-utilities\") pod \"redhat-operators-b5sj7\" (UID: \"570f926c-8f52-4b77-a139-bfa9d3b61071\") " pod="openshift-marketplace/redhat-operators-b5sj7" Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.518061 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s8qwc\" (UniqueName: \"kubernetes.io/projected/570f926c-8f52-4b77-a139-bfa9d3b61071-kube-api-access-s8qwc\") pod \"redhat-operators-b5sj7\" (UID: \"570f926c-8f52-4b77-a139-bfa9d3b61071\") " pod="openshift-marketplace/redhat-operators-b5sj7" Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.518275 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/570f926c-8f52-4b77-a139-bfa9d3b61071-catalog-content\") pod \"redhat-operators-b5sj7\" (UID: \"570f926c-8f52-4b77-a139-bfa9d3b61071\") " pod="openshift-marketplace/redhat-operators-b5sj7" Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.619715 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/570f926c-8f52-4b77-a139-bfa9d3b61071-utilities\") pod \"redhat-operators-b5sj7\" (UID: \"570f926c-8f52-4b77-a139-bfa9d3b61071\") " pod="openshift-marketplace/redhat-operators-b5sj7" Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.619782 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s8qwc\" (UniqueName: \"kubernetes.io/projected/570f926c-8f52-4b77-a139-bfa9d3b61071-kube-api-access-s8qwc\") pod \"redhat-operators-b5sj7\" (UID: \"570f926c-8f52-4b77-a139-bfa9d3b61071\") " pod="openshift-marketplace/redhat-operators-b5sj7" Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.619828 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/570f926c-8f52-4b77-a139-bfa9d3b61071-catalog-content\") pod \"redhat-operators-b5sj7\" (UID: \"570f926c-8f52-4b77-a139-bfa9d3b61071\") " pod="openshift-marketplace/redhat-operators-b5sj7" Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.620591 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/570f926c-8f52-4b77-a139-bfa9d3b61071-catalog-content\") pod \"redhat-operators-b5sj7\" (UID: \"570f926c-8f52-4b77-a139-bfa9d3b61071\") " pod="openshift-marketplace/redhat-operators-b5sj7" Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.620598 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/570f926c-8f52-4b77-a139-bfa9d3b61071-utilities\") pod \"redhat-operators-b5sj7\" (UID: \"570f926c-8f52-4b77-a139-bfa9d3b61071\") " pod="openshift-marketplace/redhat-operators-b5sj7" Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.642525 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s8qwc\" (UniqueName: \"kubernetes.io/projected/570f926c-8f52-4b77-a139-bfa9d3b61071-kube-api-access-s8qwc\") pod \"redhat-operators-b5sj7\" (UID: \"570f926c-8f52-4b77-a139-bfa9d3b61071\") " pod="openshift-marketplace/redhat-operators-b5sj7" Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.650248 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.828962 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-np59v"] Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.830450 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-np59v" Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.837213 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b5sj7" Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.874236 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-np59v"] Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.925431 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dcdvd\" (UniqueName: \"kubernetes.io/projected/a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8-kube-api-access-dcdvd\") pod \"redhat-operators-np59v\" (UID: \"a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8\") " pod="openshift-marketplace/redhat-operators-np59v" Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.925478 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8-utilities\") pod \"redhat-operators-np59v\" (UID: \"a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8\") " pod="openshift-marketplace/redhat-operators-np59v" Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.925510 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8-catalog-content\") pod \"redhat-operators-np59v\" (UID: \"a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8\") " pod="openshift-marketplace/redhat-operators-np59v" Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.929110 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.934607 5037 patch_prober.go:28] interesting pod/downloads-7954f5f757-vwp8j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.934664 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vwp8j" podUID="9dfb8a84-f022-4823-b563-5800b665b32f" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.934927 5037 patch_prober.go:28] interesting pod/downloads-7954f5f757-vwp8j container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.935014 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-vwp8j" podUID="9dfb8a84-f022-4823-b563-5800b665b32f" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.958259 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-qfdqh" Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.958408 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-qfdqh" Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.960468 5037 patch_prober.go:28] interesting pod/console-f9d7485db-qfdqh container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.9:8443/health\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Nov 26 14:18:05 crc kubenswrapper[5037]: I1126 14:18:05.960560 5037 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-qfdqh" podUID="25030986-5796-4784-accd-c465c7c2daa3" containerName="console" probeResult="failure" output="Get \"https://10.217.0.9:8443/health\": dial tcp 10.217.0.9:8443: connect: connection refused" Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.027937 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8-catalog-content\") pod \"redhat-operators-np59v\" (UID: \"a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8\") " pod="openshift-marketplace/redhat-operators-np59v" Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.028125 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dcdvd\" (UniqueName: \"kubernetes.io/projected/a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8-kube-api-access-dcdvd\") pod \"redhat-operators-np59v\" (UID: \"a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8\") " pod="openshift-marketplace/redhat-operators-np59v" Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.028149 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8-utilities\") pod \"redhat-operators-np59v\" (UID: \"a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8\") " pod="openshift-marketplace/redhat-operators-np59v" Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.028633 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8-catalog-content\") pod \"redhat-operators-np59v\" (UID: \"a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8\") " pod="openshift-marketplace/redhat-operators-np59v" Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.028646 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8-utilities\") pod \"redhat-operators-np59v\" (UID: \"a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8\") " pod="openshift-marketplace/redhat-operators-np59v" Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.052091 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-nwzvj"] Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.054504 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dcdvd\" (UniqueName: \"kubernetes.io/projected/a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8-kube-api-access-dcdvd\") pod \"redhat-operators-np59v\" (UID: \"a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8\") " pod="openshift-marketplace/redhat-operators-np59v" Nov 26 14:18:06 crc kubenswrapper[5037]: W1126 14:18:06.084968 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfd96f84a_d233_4a35_9c2e_5b7f3c8c8fbb.slice/crio-2c7dad4dd4ecbd8ecad7e4a8fda1da0a3998665191422d49bde2d4500a066460 WatchSource:0}: Error finding container 2c7dad4dd4ecbd8ecad7e4a8fda1da0a3998665191422d49bde2d4500a066460: Status 404 returned error can't find the container with id 2c7dad4dd4ecbd8ecad7e4a8fda1da0a3998665191422d49bde2d4500a066460 Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.159411 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-b5sj7"] Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.182585 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-np59v" Nov 26 14:18:06 crc kubenswrapper[5037]: W1126 14:18:06.190491 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod570f926c_8f52_4b77_a139_bfa9d3b61071.slice/crio-3c5a6c768ffc48858417cffe4d1be10bc164e0481ebac1ff633ca442a058c50a WatchSource:0}: Error finding container 3c5a6c768ffc48858417cffe4d1be10bc164e0481ebac1ff633ca442a058c50a: Status 404 returned error can't find the container with id 3c5a6c768ffc48858417cffe4d1be10bc164e0481ebac1ff633ca442a058c50a Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.255470 5037 generic.go:334] "Generic (PLEG): container finished" podID="e33b5e20-dd02-4850-b59b-40a271de1b3f" containerID="4e28fcc51c73fd80e4d786e7b960aaa63f4745e8ca7d491f35092746d37a238e" exitCode=0 Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.255518 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-msn7s" event={"ID":"e33b5e20-dd02-4850-b59b-40a271de1b3f","Type":"ContainerDied","Data":"4e28fcc51c73fd80e4d786e7b960aaa63f4745e8ca7d491f35092746d37a238e"} Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.255561 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-msn7s" event={"ID":"e33b5e20-dd02-4850-b59b-40a271de1b3f","Type":"ContainerStarted","Data":"d7afab889d9a341f2b766b14f2ef82f0ef1b041be7aa8674e9e091f3a86195d3"} Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.257864 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" event={"ID":"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb","Type":"ContainerStarted","Data":"2c7dad4dd4ecbd8ecad7e4a8fda1da0a3998665191422d49bde2d4500a066460"} Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.263531 5037 generic.go:334] "Generic (PLEG): container finished" podID="707b088e-aa31-4988-8677-6dcac9117725" containerID="beda4050355ce07612aa16319836730cd34f5144da51fd3a490c0809545b5431" exitCode=0 Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.264009 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wsj5x" event={"ID":"707b088e-aa31-4988-8677-6dcac9117725","Type":"ContainerDied","Data":"beda4050355ce07612aa16319836730cd34f5144da51fd3a490c0809545b5431"} Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.264076 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wsj5x" event={"ID":"707b088e-aa31-4988-8677-6dcac9117725","Type":"ContainerStarted","Data":"7ae9e78d00273155d3e7bece699f60eb47e1432d17cfb6ad0ede43405ddb085e"} Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.268992 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5sj7" event={"ID":"570f926c-8f52-4b77-a139-bfa9d3b61071","Type":"ContainerStarted","Data":"3c5a6c768ffc48858417cffe4d1be10bc164e0481ebac1ff633ca442a058c50a"} Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.426273 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-gncr9" Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.430889 5037 patch_prober.go:28] interesting pod/router-default-5444994796-gncr9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 14:18:06 crc kubenswrapper[5037]: [-]has-synced failed: reason withheld Nov 26 14:18:06 crc kubenswrapper[5037]: [+]process-running ok Nov 26 14:18:06 crc kubenswrapper[5037]: healthz check failed Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.430946 5037 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gncr9" podUID="73f71d36-826a-4890-8f3f-6f1f3f159d5e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.433245 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-txjgw" Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.540231 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402775-ts75f" Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.565510 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-np59v"] Nov 26 14:18:06 crc kubenswrapper[5037]: W1126 14:18:06.583814 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda9e08d30_22dd_4fc2_ab4f_a742e2e9c3d8.slice/crio-41704a4c1a34935b2fb8791ff052d4d912aee0f299eb6e5f74bcde7071c958e5 WatchSource:0}: Error finding container 41704a4c1a34935b2fb8791ff052d4d912aee0f299eb6e5f74bcde7071c958e5: Status 404 returned error can't find the container with id 41704a4c1a34935b2fb8791ff052d4d912aee0f299eb6e5f74bcde7071c958e5 Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.644300 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj7zm\" (UniqueName: \"kubernetes.io/projected/da42804b-0fa3-43ee-9566-296c28b8052f-kube-api-access-pj7zm\") pod \"da42804b-0fa3-43ee-9566-296c28b8052f\" (UID: \"da42804b-0fa3-43ee-9566-296c28b8052f\") " Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.644384 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/da42804b-0fa3-43ee-9566-296c28b8052f-secret-volume\") pod \"da42804b-0fa3-43ee-9566-296c28b8052f\" (UID: \"da42804b-0fa3-43ee-9566-296c28b8052f\") " Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.644514 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/da42804b-0fa3-43ee-9566-296c28b8052f-config-volume\") pod \"da42804b-0fa3-43ee-9566-296c28b8052f\" (UID: \"da42804b-0fa3-43ee-9566-296c28b8052f\") " Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.645808 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/da42804b-0fa3-43ee-9566-296c28b8052f-config-volume" (OuterVolumeSpecName: "config-volume") pod "da42804b-0fa3-43ee-9566-296c28b8052f" (UID: "da42804b-0fa3-43ee-9566-296c28b8052f"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.659447 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da42804b-0fa3-43ee-9566-296c28b8052f-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "da42804b-0fa3-43ee-9566-296c28b8052f" (UID: "da42804b-0fa3-43ee-9566-296c28b8052f"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.662766 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da42804b-0fa3-43ee-9566-296c28b8052f-kube-api-access-pj7zm" (OuterVolumeSpecName: "kube-api-access-pj7zm") pod "da42804b-0fa3-43ee-9566-296c28b8052f" (UID: "da42804b-0fa3-43ee-9566-296c28b8052f"). InnerVolumeSpecName "kube-api-access-pj7zm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.746745 5037 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/da42804b-0fa3-43ee-9566-296c28b8052f-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.746809 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj7zm\" (UniqueName: \"kubernetes.io/projected/da42804b-0fa3-43ee-9566-296c28b8052f-kube-api-access-pj7zm\") on node \"crc\" DevicePath \"\"" Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.746819 5037 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/da42804b-0fa3-43ee-9566-296c28b8052f-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.856099 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 26 14:18:06 crc kubenswrapper[5037]: E1126 14:18:06.856384 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da42804b-0fa3-43ee-9566-296c28b8052f" containerName="collect-profiles" Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.856397 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="da42804b-0fa3-43ee-9566-296c28b8052f" containerName="collect-profiles" Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.856485 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="da42804b-0fa3-43ee-9566-296c28b8052f" containerName="collect-profiles" Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.859208 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.862969 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.865921 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.874065 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.951735 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/aa1cfda0-0f53-494b-beb2-8ec2c81fa533-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"aa1cfda0-0f53-494b-beb2-8ec2c81fa533\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 14:18:06 crc kubenswrapper[5037]: I1126 14:18:06.952239 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/aa1cfda0-0f53-494b-beb2-8ec2c81fa533-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"aa1cfda0-0f53-494b-beb2-8ec2c81fa533\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 14:18:07 crc kubenswrapper[5037]: I1126 14:18:07.054948 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/aa1cfda0-0f53-494b-beb2-8ec2c81fa533-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"aa1cfda0-0f53-494b-beb2-8ec2c81fa533\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 14:18:07 crc kubenswrapper[5037]: I1126 14:18:07.055040 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/aa1cfda0-0f53-494b-beb2-8ec2c81fa533-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"aa1cfda0-0f53-494b-beb2-8ec2c81fa533\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 14:18:07 crc kubenswrapper[5037]: I1126 14:18:07.055114 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/aa1cfda0-0f53-494b-beb2-8ec2c81fa533-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"aa1cfda0-0f53-494b-beb2-8ec2c81fa533\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 14:18:07 crc kubenswrapper[5037]: I1126 14:18:07.084433 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/aa1cfda0-0f53-494b-beb2-8ec2c81fa533-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"aa1cfda0-0f53-494b-beb2-8ec2c81fa533\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 14:18:07 crc kubenswrapper[5037]: I1126 14:18:07.226759 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 14:18:07 crc kubenswrapper[5037]: I1126 14:18:07.299264 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" event={"ID":"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb","Type":"ContainerStarted","Data":"ea5c494f720ea12b84450a4cac184760c05ae2899c834297277760cdcc4d86dd"} Nov 26 14:18:07 crc kubenswrapper[5037]: I1126 14:18:07.300265 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:18:07 crc kubenswrapper[5037]: I1126 14:18:07.343253 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" podStartSLOduration=132.343226179 podStartE2EDuration="2m12.343226179s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:18:07.341156706 +0000 UTC m=+154.137926890" watchObservedRunningTime="2025-11-26 14:18:07.343226179 +0000 UTC m=+154.139996363" Nov 26 14:18:07 crc kubenswrapper[5037]: I1126 14:18:07.346922 5037 generic.go:334] "Generic (PLEG): container finished" podID="570f926c-8f52-4b77-a139-bfa9d3b61071" containerID="ef443edbc36ea241d41854ffc60714eddd7af29908d820af53dd96c0a93f1040" exitCode=0 Nov 26 14:18:07 crc kubenswrapper[5037]: I1126 14:18:07.347241 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5sj7" event={"ID":"570f926c-8f52-4b77-a139-bfa9d3b61071","Type":"ContainerDied","Data":"ef443edbc36ea241d41854ffc60714eddd7af29908d820af53dd96c0a93f1040"} Nov 26 14:18:07 crc kubenswrapper[5037]: I1126 14:18:07.357421 5037 generic.go:334] "Generic (PLEG): container finished" podID="a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8" containerID="f56d1d8ed76a2d6938a10971022747efa0df312f78de4743d87c4b0c4012ceb7" exitCode=0 Nov 26 14:18:07 crc kubenswrapper[5037]: I1126 14:18:07.357548 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-np59v" event={"ID":"a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8","Type":"ContainerDied","Data":"f56d1d8ed76a2d6938a10971022747efa0df312f78de4743d87c4b0c4012ceb7"} Nov 26 14:18:07 crc kubenswrapper[5037]: I1126 14:18:07.357589 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-np59v" event={"ID":"a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8","Type":"ContainerStarted","Data":"41704a4c1a34935b2fb8791ff052d4d912aee0f299eb6e5f74bcde7071c958e5"} Nov 26 14:18:07 crc kubenswrapper[5037]: I1126 14:18:07.361970 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402775-ts75f" event={"ID":"da42804b-0fa3-43ee-9566-296c28b8052f","Type":"ContainerDied","Data":"716aa628b01c5b4c676b731f3649ee5809b6b977ec3249cda37fa4dfea60ab3b"} Nov 26 14:18:07 crc kubenswrapper[5037]: I1126 14:18:07.362000 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402775-ts75f" Nov 26 14:18:07 crc kubenswrapper[5037]: I1126 14:18:07.362020 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="716aa628b01c5b4c676b731f3649ee5809b6b977ec3249cda37fa4dfea60ab3b" Nov 26 14:18:07 crc kubenswrapper[5037]: I1126 14:18:07.424526 5037 patch_prober.go:28] interesting pod/router-default-5444994796-gncr9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 14:18:07 crc kubenswrapper[5037]: [-]has-synced failed: reason withheld Nov 26 14:18:07 crc kubenswrapper[5037]: [+]process-running ok Nov 26 14:18:07 crc kubenswrapper[5037]: healthz check failed Nov 26 14:18:07 crc kubenswrapper[5037]: I1126 14:18:07.424595 5037 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gncr9" podUID="73f71d36-826a-4890-8f3f-6f1f3f159d5e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 14:18:07 crc kubenswrapper[5037]: I1126 14:18:07.721064 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 26 14:18:08 crc kubenswrapper[5037]: I1126 14:18:08.402980 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"aa1cfda0-0f53-494b-beb2-8ec2c81fa533","Type":"ContainerStarted","Data":"319ed2819a7d20d6d46767def732c32f3bbb6dff66784f8032828b47a53aadad"} Nov 26 14:18:08 crc kubenswrapper[5037]: I1126 14:18:08.428137 5037 patch_prober.go:28] interesting pod/router-default-5444994796-gncr9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 14:18:08 crc kubenswrapper[5037]: [-]has-synced failed: reason withheld Nov 26 14:18:08 crc kubenswrapper[5037]: [+]process-running ok Nov 26 14:18:08 crc kubenswrapper[5037]: healthz check failed Nov 26 14:18:08 crc kubenswrapper[5037]: I1126 14:18:08.428217 5037 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gncr9" podUID="73f71d36-826a-4890-8f3f-6f1f3f159d5e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 14:18:09 crc kubenswrapper[5037]: I1126 14:18:09.435748 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-gncr9" Nov 26 14:18:09 crc kubenswrapper[5037]: I1126 14:18:09.442260 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-gncr9" Nov 26 14:18:09 crc kubenswrapper[5037]: I1126 14:18:09.449449 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"aa1cfda0-0f53-494b-beb2-8ec2c81fa533","Type":"ContainerStarted","Data":"76fce539242de3521a4565eeefc63fbf6ef11a2e0bc957d2c801f1dd08ead22f"} Nov 26 14:18:09 crc kubenswrapper[5037]: I1126 14:18:09.475463 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=3.475439831 podStartE2EDuration="3.475439831s" podCreationTimestamp="2025-11-26 14:18:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:18:09.4718374 +0000 UTC m=+156.268607584" watchObservedRunningTime="2025-11-26 14:18:09.475439831 +0000 UTC m=+156.272210005" Nov 26 14:18:09 crc kubenswrapper[5037]: I1126 14:18:09.924903 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 26 14:18:09 crc kubenswrapper[5037]: I1126 14:18:09.925745 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 14:18:09 crc kubenswrapper[5037]: I1126 14:18:09.929604 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 26 14:18:09 crc kubenswrapper[5037]: I1126 14:18:09.929756 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 26 14:18:09 crc kubenswrapper[5037]: I1126 14:18:09.935239 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 26 14:18:10 crc kubenswrapper[5037]: I1126 14:18:10.048792 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0d896f7f-fe00-4729-89ef-1321f399a314-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"0d896f7f-fe00-4729-89ef-1321f399a314\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 14:18:10 crc kubenswrapper[5037]: I1126 14:18:10.048920 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0d896f7f-fe00-4729-89ef-1321f399a314-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"0d896f7f-fe00-4729-89ef-1321f399a314\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 14:18:10 crc kubenswrapper[5037]: I1126 14:18:10.150653 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0d896f7f-fe00-4729-89ef-1321f399a314-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"0d896f7f-fe00-4729-89ef-1321f399a314\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 14:18:10 crc kubenswrapper[5037]: I1126 14:18:10.150843 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0d896f7f-fe00-4729-89ef-1321f399a314-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"0d896f7f-fe00-4729-89ef-1321f399a314\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 14:18:10 crc kubenswrapper[5037]: I1126 14:18:10.151421 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0d896f7f-fe00-4729-89ef-1321f399a314-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"0d896f7f-fe00-4729-89ef-1321f399a314\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 14:18:10 crc kubenswrapper[5037]: I1126 14:18:10.176697 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0d896f7f-fe00-4729-89ef-1321f399a314-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"0d896f7f-fe00-4729-89ef-1321f399a314\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 14:18:10 crc kubenswrapper[5037]: I1126 14:18:10.302623 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 14:18:10 crc kubenswrapper[5037]: I1126 14:18:10.467001 5037 generic.go:334] "Generic (PLEG): container finished" podID="aa1cfda0-0f53-494b-beb2-8ec2c81fa533" containerID="76fce539242de3521a4565eeefc63fbf6ef11a2e0bc957d2c801f1dd08ead22f" exitCode=0 Nov 26 14:18:10 crc kubenswrapper[5037]: I1126 14:18:10.467437 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"aa1cfda0-0f53-494b-beb2-8ec2c81fa533","Type":"ContainerDied","Data":"76fce539242de3521a4565eeefc63fbf6ef11a2e0bc957d2c801f1dd08ead22f"} Nov 26 14:18:10 crc kubenswrapper[5037]: I1126 14:18:10.784542 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 26 14:18:11 crc kubenswrapper[5037]: I1126 14:18:11.214015 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-9hccb" Nov 26 14:18:11 crc kubenswrapper[5037]: I1126 14:18:11.257958 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 14:18:11 crc kubenswrapper[5037]: I1126 14:18:11.258041 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 14:18:11 crc kubenswrapper[5037]: I1126 14:18:11.502884 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"0d896f7f-fe00-4729-89ef-1321f399a314","Type":"ContainerStarted","Data":"0f6d924ac1b82e9fe340aa42506b26a27bfcb63e890de3cbd8e4ca4ed064f1ee"} Nov 26 14:18:11 crc kubenswrapper[5037]: I1126 14:18:11.978621 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 14:18:12 crc kubenswrapper[5037]: I1126 14:18:12.083405 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/aa1cfda0-0f53-494b-beb2-8ec2c81fa533-kubelet-dir\") pod \"aa1cfda0-0f53-494b-beb2-8ec2c81fa533\" (UID: \"aa1cfda0-0f53-494b-beb2-8ec2c81fa533\") " Nov 26 14:18:12 crc kubenswrapper[5037]: I1126 14:18:12.083460 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/aa1cfda0-0f53-494b-beb2-8ec2c81fa533-kube-api-access\") pod \"aa1cfda0-0f53-494b-beb2-8ec2c81fa533\" (UID: \"aa1cfda0-0f53-494b-beb2-8ec2c81fa533\") " Nov 26 14:18:12 crc kubenswrapper[5037]: I1126 14:18:12.083514 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/aa1cfda0-0f53-494b-beb2-8ec2c81fa533-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "aa1cfda0-0f53-494b-beb2-8ec2c81fa533" (UID: "aa1cfda0-0f53-494b-beb2-8ec2c81fa533"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:18:12 crc kubenswrapper[5037]: I1126 14:18:12.083706 5037 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/aa1cfda0-0f53-494b-beb2-8ec2c81fa533-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 26 14:18:12 crc kubenswrapper[5037]: I1126 14:18:12.103371 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa1cfda0-0f53-494b-beb2-8ec2c81fa533-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "aa1cfda0-0f53-494b-beb2-8ec2c81fa533" (UID: "aa1cfda0-0f53-494b-beb2-8ec2c81fa533"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:18:12 crc kubenswrapper[5037]: I1126 14:18:12.185541 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/aa1cfda0-0f53-494b-beb2-8ec2c81fa533-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 14:18:12 crc kubenswrapper[5037]: I1126 14:18:12.528410 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"0d896f7f-fe00-4729-89ef-1321f399a314","Type":"ContainerStarted","Data":"0170e199f54242b6f700c95773fc64be1c45bb935f4866c54b695ccf4441718c"} Nov 26 14:18:12 crc kubenswrapper[5037]: I1126 14:18:12.541083 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"aa1cfda0-0f53-494b-beb2-8ec2c81fa533","Type":"ContainerDied","Data":"319ed2819a7d20d6d46767def732c32f3bbb6dff66784f8032828b47a53aadad"} Nov 26 14:18:12 crc kubenswrapper[5037]: I1126 14:18:12.541145 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="319ed2819a7d20d6d46767def732c32f3bbb6dff66784f8032828b47a53aadad" Nov 26 14:18:12 crc kubenswrapper[5037]: I1126 14:18:12.541238 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 14:18:12 crc kubenswrapper[5037]: I1126 14:18:12.548202 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=3.548173169 podStartE2EDuration="3.548173169s" podCreationTimestamp="2025-11-26 14:18:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:18:12.546691271 +0000 UTC m=+159.343461605" watchObservedRunningTime="2025-11-26 14:18:12.548173169 +0000 UTC m=+159.344943363" Nov 26 14:18:13 crc kubenswrapper[5037]: I1126 14:18:13.555933 5037 generic.go:334] "Generic (PLEG): container finished" podID="0d896f7f-fe00-4729-89ef-1321f399a314" containerID="0170e199f54242b6f700c95773fc64be1c45bb935f4866c54b695ccf4441718c" exitCode=0 Nov 26 14:18:13 crc kubenswrapper[5037]: I1126 14:18:13.556017 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"0d896f7f-fe00-4729-89ef-1321f399a314","Type":"ContainerDied","Data":"0170e199f54242b6f700c95773fc64be1c45bb935f4866c54b695ccf4441718c"} Nov 26 14:18:15 crc kubenswrapper[5037]: I1126 14:18:15.934683 5037 patch_prober.go:28] interesting pod/downloads-7954f5f757-vwp8j container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 26 14:18:15 crc kubenswrapper[5037]: I1126 14:18:15.935173 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-vwp8j" podUID="9dfb8a84-f022-4823-b563-5800b665b32f" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 26 14:18:15 crc kubenswrapper[5037]: I1126 14:18:15.934732 5037 patch_prober.go:28] interesting pod/downloads-7954f5f757-vwp8j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 26 14:18:15 crc kubenswrapper[5037]: I1126 14:18:15.935631 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vwp8j" podUID="9dfb8a84-f022-4823-b563-5800b665b32f" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 26 14:18:15 crc kubenswrapper[5037]: I1126 14:18:15.958829 5037 patch_prober.go:28] interesting pod/console-f9d7485db-qfdqh container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.9:8443/health\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Nov 26 14:18:15 crc kubenswrapper[5037]: I1126 14:18:15.958911 5037 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-qfdqh" podUID="25030986-5796-4784-accd-c465c7c2daa3" containerName="console" probeResult="failure" output="Get \"https://10.217.0.9:8443/health\": dial tcp 10.217.0.9:8443: connect: connection refused" Nov 26 14:18:17 crc kubenswrapper[5037]: I1126 14:18:17.291119 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b18a6f09-7a1e-4965-81e2-dde847147b41-metrics-certs\") pod \"network-metrics-daemon-wjch9\" (UID: \"b18a6f09-7a1e-4965-81e2-dde847147b41\") " pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:18:17 crc kubenswrapper[5037]: I1126 14:18:17.299661 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b18a6f09-7a1e-4965-81e2-dde847147b41-metrics-certs\") pod \"network-metrics-daemon-wjch9\" (UID: \"b18a6f09-7a1e-4965-81e2-dde847147b41\") " pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:18:17 crc kubenswrapper[5037]: I1126 14:18:17.547731 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-wjch9" Nov 26 14:18:25 crc kubenswrapper[5037]: I1126 14:18:25.656140 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:18:25 crc kubenswrapper[5037]: I1126 14:18:25.958844 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-vwp8j" Nov 26 14:18:25 crc kubenswrapper[5037]: I1126 14:18:25.962647 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-qfdqh" Nov 26 14:18:25 crc kubenswrapper[5037]: I1126 14:18:25.965912 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-qfdqh" Nov 26 14:18:28 crc kubenswrapper[5037]: I1126 14:18:28.825812 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 14:18:28 crc kubenswrapper[5037]: I1126 14:18:28.901619 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0d896f7f-fe00-4729-89ef-1321f399a314-kube-api-access\") pod \"0d896f7f-fe00-4729-89ef-1321f399a314\" (UID: \"0d896f7f-fe00-4729-89ef-1321f399a314\") " Nov 26 14:18:28 crc kubenswrapper[5037]: I1126 14:18:28.901723 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0d896f7f-fe00-4729-89ef-1321f399a314-kubelet-dir\") pod \"0d896f7f-fe00-4729-89ef-1321f399a314\" (UID: \"0d896f7f-fe00-4729-89ef-1321f399a314\") " Nov 26 14:18:28 crc kubenswrapper[5037]: I1126 14:18:28.901915 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0d896f7f-fe00-4729-89ef-1321f399a314-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "0d896f7f-fe00-4729-89ef-1321f399a314" (UID: "0d896f7f-fe00-4729-89ef-1321f399a314"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:18:28 crc kubenswrapper[5037]: I1126 14:18:28.918523 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d896f7f-fe00-4729-89ef-1321f399a314-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0d896f7f-fe00-4729-89ef-1321f399a314" (UID: "0d896f7f-fe00-4729-89ef-1321f399a314"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:18:29 crc kubenswrapper[5037]: I1126 14:18:29.003453 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0d896f7f-fe00-4729-89ef-1321f399a314-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 14:18:29 crc kubenswrapper[5037]: I1126 14:18:29.003501 5037 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0d896f7f-fe00-4729-89ef-1321f399a314-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 26 14:18:29 crc kubenswrapper[5037]: I1126 14:18:29.707414 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"0d896f7f-fe00-4729-89ef-1321f399a314","Type":"ContainerDied","Data":"0f6d924ac1b82e9fe340aa42506b26a27bfcb63e890de3cbd8e4ca4ed064f1ee"} Nov 26 14:18:29 crc kubenswrapper[5037]: I1126 14:18:29.707457 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 14:18:29 crc kubenswrapper[5037]: I1126 14:18:29.707466 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0f6d924ac1b82e9fe340aa42506b26a27bfcb63e890de3cbd8e4ca4ed064f1ee" Nov 26 14:18:36 crc kubenswrapper[5037]: I1126 14:18:36.417382 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jm5zr" Nov 26 14:18:41 crc kubenswrapper[5037]: E1126 14:18:41.047050 5037 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 26 14:18:41 crc kubenswrapper[5037]: E1126 14:18:41.047943 5037 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-s8qwc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-b5sj7_openshift-marketplace(570f926c-8f52-4b77-a139-bfa9d3b61071): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 14:18:41 crc kubenswrapper[5037]: E1126 14:18:41.049447 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-b5sj7" podUID="570f926c-8f52-4b77-a139-bfa9d3b61071" Nov 26 14:18:41 crc kubenswrapper[5037]: E1126 14:18:41.103776 5037 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 26 14:18:41 crc kubenswrapper[5037]: E1126 14:18:41.104034 5037 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jxksz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-msn7s_openshift-marketplace(e33b5e20-dd02-4850-b59b-40a271de1b3f): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 14:18:41 crc kubenswrapper[5037]: E1126 14:18:41.105137 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-msn7s" podUID="e33b5e20-dd02-4850-b59b-40a271de1b3f" Nov 26 14:18:41 crc kubenswrapper[5037]: E1126 14:18:41.108830 5037 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 26 14:18:41 crc kubenswrapper[5037]: E1126 14:18:41.108997 5037 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-nmgjr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-m8gz7_openshift-marketplace(f379a727-1bc7-469d-8148-b7fb1abb5155): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 14:18:41 crc kubenswrapper[5037]: E1126 14:18:41.110265 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-m8gz7" podUID="f379a727-1bc7-469d-8148-b7fb1abb5155" Nov 26 14:18:41 crc kubenswrapper[5037]: E1126 14:18:41.138471 5037 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 26 14:18:41 crc kubenswrapper[5037]: E1126 14:18:41.138678 5037 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-dcdvd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-np59v_openshift-marketplace(a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 14:18:41 crc kubenswrapper[5037]: E1126 14:18:41.143167 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-np59v" podUID="a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8" Nov 26 14:18:41 crc kubenswrapper[5037]: I1126 14:18:41.247230 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 14:18:41 crc kubenswrapper[5037]: I1126 14:18:41.247652 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 14:18:41 crc kubenswrapper[5037]: I1126 14:18:41.455838 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-wjch9"] Nov 26 14:18:41 crc kubenswrapper[5037]: W1126 14:18:41.469156 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb18a6f09_7a1e_4965_81e2_dde847147b41.slice/crio-7975532cfac18104afbd0af85ef4600f785464bbdf0e2a3005951d837492a79c WatchSource:0}: Error finding container 7975532cfac18104afbd0af85ef4600f785464bbdf0e2a3005951d837492a79c: Status 404 returned error can't find the container with id 7975532cfac18104afbd0af85ef4600f785464bbdf0e2a3005951d837492a79c Nov 26 14:18:41 crc kubenswrapper[5037]: I1126 14:18:41.783408 5037 generic.go:334] "Generic (PLEG): container finished" podID="bf9611ba-47f1-43da-92fc-a4f99606500a" containerID="2ee127915b93a0f0611a2bc7f09947588c410281c1834e927ec7155c99d46fbe" exitCode=0 Nov 26 14:18:41 crc kubenswrapper[5037]: I1126 14:18:41.783868 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wb4bw" event={"ID":"bf9611ba-47f1-43da-92fc-a4f99606500a","Type":"ContainerDied","Data":"2ee127915b93a0f0611a2bc7f09947588c410281c1834e927ec7155c99d46fbe"} Nov 26 14:18:41 crc kubenswrapper[5037]: I1126 14:18:41.792633 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-wjch9" event={"ID":"b18a6f09-7a1e-4965-81e2-dde847147b41","Type":"ContainerStarted","Data":"7975532cfac18104afbd0af85ef4600f785464bbdf0e2a3005951d837492a79c"} Nov 26 14:18:41 crc kubenswrapper[5037]: I1126 14:18:41.795927 5037 generic.go:334] "Generic (PLEG): container finished" podID="c6c8b303-75aa-4a87-a45c-fd1776689864" containerID="e7261a8e44f033926aa01fd9757d1db51f1dc4840dc7ec5bfd28ce43ffb0ce38" exitCode=0 Nov 26 14:18:41 crc kubenswrapper[5037]: I1126 14:18:41.795999 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hjjdh" event={"ID":"c6c8b303-75aa-4a87-a45c-fd1776689864","Type":"ContainerDied","Data":"e7261a8e44f033926aa01fd9757d1db51f1dc4840dc7ec5bfd28ce43ffb0ce38"} Nov 26 14:18:41 crc kubenswrapper[5037]: I1126 14:18:41.802227 5037 generic.go:334] "Generic (PLEG): container finished" podID="707b088e-aa31-4988-8677-6dcac9117725" containerID="44c646debec275dd1535e88181fdb5fd2d641c5658d457750b556e66e0e49f9c" exitCode=0 Nov 26 14:18:41 crc kubenswrapper[5037]: I1126 14:18:41.802306 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wsj5x" event={"ID":"707b088e-aa31-4988-8677-6dcac9117725","Type":"ContainerDied","Data":"44c646debec275dd1535e88181fdb5fd2d641c5658d457750b556e66e0e49f9c"} Nov 26 14:18:41 crc kubenswrapper[5037]: I1126 14:18:41.809246 5037 generic.go:334] "Generic (PLEG): container finished" podID="354f57d7-61fc-44f1-ab03-d7bab0a6a984" containerID="ad0706ec85c3cff29c39bb87403013a873a8d30acd860478064a082b633b509f" exitCode=0 Nov 26 14:18:41 crc kubenswrapper[5037]: I1126 14:18:41.811834 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-smcps" event={"ID":"354f57d7-61fc-44f1-ab03-d7bab0a6a984","Type":"ContainerDied","Data":"ad0706ec85c3cff29c39bb87403013a873a8d30acd860478064a082b633b509f"} Nov 26 14:18:41 crc kubenswrapper[5037]: E1126 14:18:41.814529 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-np59v" podUID="a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8" Nov 26 14:18:41 crc kubenswrapper[5037]: E1126 14:18:41.817890 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-m8gz7" podUID="f379a727-1bc7-469d-8148-b7fb1abb5155" Nov 26 14:18:41 crc kubenswrapper[5037]: E1126 14:18:41.817963 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-msn7s" podUID="e33b5e20-dd02-4850-b59b-40a271de1b3f" Nov 26 14:18:41 crc kubenswrapper[5037]: E1126 14:18:41.818026 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-b5sj7" podUID="570f926c-8f52-4b77-a139-bfa9d3b61071" Nov 26 14:18:41 crc kubenswrapper[5037]: I1126 14:18:41.961672 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 14:18:42 crc kubenswrapper[5037]: I1126 14:18:42.830661 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-wjch9" event={"ID":"b18a6f09-7a1e-4965-81e2-dde847147b41","Type":"ContainerStarted","Data":"096d97b3437581d0b3cc84d60930471f6f47618d5cc7c7e9887c995df2344aff"} Nov 26 14:18:42 crc kubenswrapper[5037]: I1126 14:18:42.831079 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-wjch9" event={"ID":"b18a6f09-7a1e-4965-81e2-dde847147b41","Type":"ContainerStarted","Data":"7e85ada8b1c841cc0314de7fa11f8a794a26749fb385d5809bc227c3a991a6cd"} Nov 26 14:18:42 crc kubenswrapper[5037]: I1126 14:18:42.833447 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hjjdh" event={"ID":"c6c8b303-75aa-4a87-a45c-fd1776689864","Type":"ContainerStarted","Data":"6d37f75349d262f6794d56fbc104efd0c4ce76751dce93e5880129545baeca7f"} Nov 26 14:18:42 crc kubenswrapper[5037]: I1126 14:18:42.854855 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-wjch9" podStartSLOduration=167.854829322 podStartE2EDuration="2m47.854829322s" podCreationTimestamp="2025-11-26 14:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:18:42.848106701 +0000 UTC m=+189.644876885" watchObservedRunningTime="2025-11-26 14:18:42.854829322 +0000 UTC m=+189.651599506" Nov 26 14:18:42 crc kubenswrapper[5037]: I1126 14:18:42.978603 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-hjjdh" Nov 26 14:18:42 crc kubenswrapper[5037]: I1126 14:18:42.978668 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-hjjdh" Nov 26 14:18:43 crc kubenswrapper[5037]: I1126 14:18:43.842163 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wsj5x" event={"ID":"707b088e-aa31-4988-8677-6dcac9117725","Type":"ContainerStarted","Data":"4bd3ebdda6ad70a778389275491727028c0afdaad9fbafe37198005afdc4b2da"} Nov 26 14:18:43 crc kubenswrapper[5037]: I1126 14:18:43.846562 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-smcps" event={"ID":"354f57d7-61fc-44f1-ab03-d7bab0a6a984","Type":"ContainerStarted","Data":"cd1c861f1e1e28fcf918abe996f388c27b16fab64e20057f814b2be581c9f56e"} Nov 26 14:18:43 crc kubenswrapper[5037]: I1126 14:18:43.851498 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wb4bw" event={"ID":"bf9611ba-47f1-43da-92fc-a4f99606500a","Type":"ContainerStarted","Data":"9e6a7138f86d07f6ec2848f6a1aa2aa1dcb7cb0e37fa067e77f20d96e0f16a19"} Nov 26 14:18:43 crc kubenswrapper[5037]: I1126 14:18:43.870679 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-hjjdh" podStartSLOduration=4.667272625 podStartE2EDuration="41.87065214s" podCreationTimestamp="2025-11-26 14:18:02 +0000 UTC" firstStartedPulling="2025-11-26 14:18:05.0914972 +0000 UTC m=+151.888267384" lastFinishedPulling="2025-11-26 14:18:42.294876715 +0000 UTC m=+189.091646899" observedRunningTime="2025-11-26 14:18:42.880402861 +0000 UTC m=+189.677173065" watchObservedRunningTime="2025-11-26 14:18:43.87065214 +0000 UTC m=+190.667422344" Nov 26 14:18:43 crc kubenswrapper[5037]: I1126 14:18:43.877473 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-wsj5x" podStartSLOduration=3.420392309 podStartE2EDuration="39.877450443s" podCreationTimestamp="2025-11-26 14:18:04 +0000 UTC" firstStartedPulling="2025-11-26 14:18:06.265863656 +0000 UTC m=+153.062633840" lastFinishedPulling="2025-11-26 14:18:42.72292179 +0000 UTC m=+189.519691974" observedRunningTime="2025-11-26 14:18:43.868811204 +0000 UTC m=+190.665581398" watchObservedRunningTime="2025-11-26 14:18:43.877450443 +0000 UTC m=+190.674220637" Nov 26 14:18:43 crc kubenswrapper[5037]: I1126 14:18:43.898678 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-smcps" podStartSLOduration=4.292605895 podStartE2EDuration="41.898663082s" podCreationTimestamp="2025-11-26 14:18:02 +0000 UTC" firstStartedPulling="2025-11-26 14:18:05.148125508 +0000 UTC m=+151.944895692" lastFinishedPulling="2025-11-26 14:18:42.754182695 +0000 UTC m=+189.550952879" observedRunningTime="2025-11-26 14:18:43.895726687 +0000 UTC m=+190.692496871" watchObservedRunningTime="2025-11-26 14:18:43.898663082 +0000 UTC m=+190.695433266" Nov 26 14:18:43 crc kubenswrapper[5037]: I1126 14:18:43.916524 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-wb4bw" podStartSLOduration=4.355591606 podStartE2EDuration="41.916504945s" podCreationTimestamp="2025-11-26 14:18:02 +0000 UTC" firstStartedPulling="2025-11-26 14:18:05.247056012 +0000 UTC m=+152.043826206" lastFinishedPulling="2025-11-26 14:18:42.807969361 +0000 UTC m=+189.604739545" observedRunningTime="2025-11-26 14:18:43.916040324 +0000 UTC m=+190.712810508" watchObservedRunningTime="2025-11-26 14:18:43.916504945 +0000 UTC m=+190.713275129" Nov 26 14:18:44 crc kubenswrapper[5037]: I1126 14:18:44.096203 5037 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-hjjdh" podUID="c6c8b303-75aa-4a87-a45c-fd1776689864" containerName="registry-server" probeResult="failure" output=< Nov 26 14:18:44 crc kubenswrapper[5037]: timeout: failed to connect service ":50051" within 1s Nov 26 14:18:44 crc kubenswrapper[5037]: > Nov 26 14:18:44 crc kubenswrapper[5037]: I1126 14:18:44.936525 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-wsj5x" Nov 26 14:18:44 crc kubenswrapper[5037]: I1126 14:18:44.936610 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-wsj5x" Nov 26 14:18:44 crc kubenswrapper[5037]: I1126 14:18:44.987972 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-wsj5x" Nov 26 14:18:52 crc kubenswrapper[5037]: I1126 14:18:52.643085 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-wb4bw" Nov 26 14:18:52 crc kubenswrapper[5037]: I1126 14:18:52.643778 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-wb4bw" Nov 26 14:18:52 crc kubenswrapper[5037]: I1126 14:18:52.715690 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-wb4bw" Nov 26 14:18:52 crc kubenswrapper[5037]: I1126 14:18:52.957401 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-wb4bw" Nov 26 14:18:53 crc kubenswrapper[5037]: I1126 14:18:53.046195 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-hjjdh" Nov 26 14:18:53 crc kubenswrapper[5037]: I1126 14:18:53.091224 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-hjjdh" Nov 26 14:18:53 crc kubenswrapper[5037]: I1126 14:18:53.276546 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-smcps" Nov 26 14:18:53 crc kubenswrapper[5037]: I1126 14:18:53.276631 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-smcps" Nov 26 14:18:53 crc kubenswrapper[5037]: I1126 14:18:53.316835 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-smcps" Nov 26 14:18:53 crc kubenswrapper[5037]: I1126 14:18:53.963932 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-smcps" Nov 26 14:18:54 crc kubenswrapper[5037]: I1126 14:18:54.549984 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hjjdh"] Nov 26 14:18:54 crc kubenswrapper[5037]: I1126 14:18:54.922985 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-hjjdh" podUID="c6c8b303-75aa-4a87-a45c-fd1776689864" containerName="registry-server" containerID="cri-o://6d37f75349d262f6794d56fbc104efd0c4ce76751dce93e5880129545baeca7f" gracePeriod=2 Nov 26 14:18:54 crc kubenswrapper[5037]: I1126 14:18:54.977557 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-wsj5x" Nov 26 14:18:55 crc kubenswrapper[5037]: I1126 14:18:55.549718 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-smcps"] Nov 26 14:18:55 crc kubenswrapper[5037]: I1126 14:18:55.929949 5037 generic.go:334] "Generic (PLEG): container finished" podID="c6c8b303-75aa-4a87-a45c-fd1776689864" containerID="6d37f75349d262f6794d56fbc104efd0c4ce76751dce93e5880129545baeca7f" exitCode=0 Nov 26 14:18:55 crc kubenswrapper[5037]: I1126 14:18:55.930103 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hjjdh" event={"ID":"c6c8b303-75aa-4a87-a45c-fd1776689864","Type":"ContainerDied","Data":"6d37f75349d262f6794d56fbc104efd0c4ce76751dce93e5880129545baeca7f"} Nov 26 14:18:55 crc kubenswrapper[5037]: I1126 14:18:55.932795 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m8gz7" event={"ID":"f379a727-1bc7-469d-8148-b7fb1abb5155","Type":"ContainerStarted","Data":"b3d8f520dd0136bed9b91717b97b86d5a4876731097abf9bd309601c7677a6d2"} Nov 26 14:18:55 crc kubenswrapper[5037]: I1126 14:18:55.936562 5037 generic.go:334] "Generic (PLEG): container finished" podID="e33b5e20-dd02-4850-b59b-40a271de1b3f" containerID="b48c5942d91a4b3d557b6fed32103f26b65b4fccbbc64287c524d93c06a0182f" exitCode=0 Nov 26 14:18:55 crc kubenswrapper[5037]: I1126 14:18:55.936880 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-smcps" podUID="354f57d7-61fc-44f1-ab03-d7bab0a6a984" containerName="registry-server" containerID="cri-o://cd1c861f1e1e28fcf918abe996f388c27b16fab64e20057f814b2be581c9f56e" gracePeriod=2 Nov 26 14:18:55 crc kubenswrapper[5037]: I1126 14:18:55.936967 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-msn7s" event={"ID":"e33b5e20-dd02-4850-b59b-40a271de1b3f","Type":"ContainerDied","Data":"b48c5942d91a4b3d557b6fed32103f26b65b4fccbbc64287c524d93c06a0182f"} Nov 26 14:18:56 crc kubenswrapper[5037]: I1126 14:18:56.184897 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hjjdh" Nov 26 14:18:56 crc kubenswrapper[5037]: I1126 14:18:56.286518 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-smcps" Nov 26 14:18:56 crc kubenswrapper[5037]: I1126 14:18:56.312853 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2f5wh\" (UniqueName: \"kubernetes.io/projected/c6c8b303-75aa-4a87-a45c-fd1776689864-kube-api-access-2f5wh\") pod \"c6c8b303-75aa-4a87-a45c-fd1776689864\" (UID: \"c6c8b303-75aa-4a87-a45c-fd1776689864\") " Nov 26 14:18:56 crc kubenswrapper[5037]: I1126 14:18:56.312961 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6c8b303-75aa-4a87-a45c-fd1776689864-utilities\") pod \"c6c8b303-75aa-4a87-a45c-fd1776689864\" (UID: \"c6c8b303-75aa-4a87-a45c-fd1776689864\") " Nov 26 14:18:56 crc kubenswrapper[5037]: I1126 14:18:56.313083 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6c8b303-75aa-4a87-a45c-fd1776689864-catalog-content\") pod \"c6c8b303-75aa-4a87-a45c-fd1776689864\" (UID: \"c6c8b303-75aa-4a87-a45c-fd1776689864\") " Nov 26 14:18:56 crc kubenswrapper[5037]: I1126 14:18:56.314075 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c6c8b303-75aa-4a87-a45c-fd1776689864-utilities" (OuterVolumeSpecName: "utilities") pod "c6c8b303-75aa-4a87-a45c-fd1776689864" (UID: "c6c8b303-75aa-4a87-a45c-fd1776689864"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:18:56 crc kubenswrapper[5037]: I1126 14:18:56.321795 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6c8b303-75aa-4a87-a45c-fd1776689864-kube-api-access-2f5wh" (OuterVolumeSpecName: "kube-api-access-2f5wh") pod "c6c8b303-75aa-4a87-a45c-fd1776689864" (UID: "c6c8b303-75aa-4a87-a45c-fd1776689864"). InnerVolumeSpecName "kube-api-access-2f5wh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:18:56 crc kubenswrapper[5037]: I1126 14:18:56.358587 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c6c8b303-75aa-4a87-a45c-fd1776689864-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c6c8b303-75aa-4a87-a45c-fd1776689864" (UID: "c6c8b303-75aa-4a87-a45c-fd1776689864"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:18:56 crc kubenswrapper[5037]: I1126 14:18:56.414538 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gtk8x\" (UniqueName: \"kubernetes.io/projected/354f57d7-61fc-44f1-ab03-d7bab0a6a984-kube-api-access-gtk8x\") pod \"354f57d7-61fc-44f1-ab03-d7bab0a6a984\" (UID: \"354f57d7-61fc-44f1-ab03-d7bab0a6a984\") " Nov 26 14:18:56 crc kubenswrapper[5037]: I1126 14:18:56.414957 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/354f57d7-61fc-44f1-ab03-d7bab0a6a984-catalog-content\") pod \"354f57d7-61fc-44f1-ab03-d7bab0a6a984\" (UID: \"354f57d7-61fc-44f1-ab03-d7bab0a6a984\") " Nov 26 14:18:56 crc kubenswrapper[5037]: I1126 14:18:56.415100 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/354f57d7-61fc-44f1-ab03-d7bab0a6a984-utilities\") pod \"354f57d7-61fc-44f1-ab03-d7bab0a6a984\" (UID: \"354f57d7-61fc-44f1-ab03-d7bab0a6a984\") " Nov 26 14:18:56 crc kubenswrapper[5037]: I1126 14:18:56.415523 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6c8b303-75aa-4a87-a45c-fd1776689864-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 14:18:56 crc kubenswrapper[5037]: I1126 14:18:56.415674 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6c8b303-75aa-4a87-a45c-fd1776689864-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 14:18:56 crc kubenswrapper[5037]: I1126 14:18:56.415741 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2f5wh\" (UniqueName: \"kubernetes.io/projected/c6c8b303-75aa-4a87-a45c-fd1776689864-kube-api-access-2f5wh\") on node \"crc\" DevicePath \"\"" Nov 26 14:18:56 crc kubenswrapper[5037]: I1126 14:18:56.415887 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/354f57d7-61fc-44f1-ab03-d7bab0a6a984-utilities" (OuterVolumeSpecName: "utilities") pod "354f57d7-61fc-44f1-ab03-d7bab0a6a984" (UID: "354f57d7-61fc-44f1-ab03-d7bab0a6a984"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:18:56 crc kubenswrapper[5037]: I1126 14:18:56.417612 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/354f57d7-61fc-44f1-ab03-d7bab0a6a984-kube-api-access-gtk8x" (OuterVolumeSpecName: "kube-api-access-gtk8x") pod "354f57d7-61fc-44f1-ab03-d7bab0a6a984" (UID: "354f57d7-61fc-44f1-ab03-d7bab0a6a984"). InnerVolumeSpecName "kube-api-access-gtk8x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:18:56 crc kubenswrapper[5037]: I1126 14:18:56.465220 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/354f57d7-61fc-44f1-ab03-d7bab0a6a984-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "354f57d7-61fc-44f1-ab03-d7bab0a6a984" (UID: "354f57d7-61fc-44f1-ab03-d7bab0a6a984"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:18:56 crc kubenswrapper[5037]: I1126 14:18:56.517421 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gtk8x\" (UniqueName: \"kubernetes.io/projected/354f57d7-61fc-44f1-ab03-d7bab0a6a984-kube-api-access-gtk8x\") on node \"crc\" DevicePath \"\"" Nov 26 14:18:56 crc kubenswrapper[5037]: I1126 14:18:56.517471 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/354f57d7-61fc-44f1-ab03-d7bab0a6a984-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 14:18:56 crc kubenswrapper[5037]: I1126 14:18:56.517483 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/354f57d7-61fc-44f1-ab03-d7bab0a6a984-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 14:18:56 crc kubenswrapper[5037]: I1126 14:18:56.947011 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wsj5x"] Nov 26 14:18:56 crc kubenswrapper[5037]: I1126 14:18:56.947250 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-wsj5x" podUID="707b088e-aa31-4988-8677-6dcac9117725" containerName="registry-server" containerID="cri-o://4bd3ebdda6ad70a778389275491727028c0afdaad9fbafe37198005afdc4b2da" gracePeriod=2 Nov 26 14:18:56 crc kubenswrapper[5037]: I1126 14:18:56.951897 5037 generic.go:334] "Generic (PLEG): container finished" podID="f379a727-1bc7-469d-8148-b7fb1abb5155" containerID="b3d8f520dd0136bed9b91717b97b86d5a4876731097abf9bd309601c7677a6d2" exitCode=0 Nov 26 14:18:56 crc kubenswrapper[5037]: I1126 14:18:56.952107 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m8gz7" event={"ID":"f379a727-1bc7-469d-8148-b7fb1abb5155","Type":"ContainerDied","Data":"b3d8f520dd0136bed9b91717b97b86d5a4876731097abf9bd309601c7677a6d2"} Nov 26 14:18:56 crc kubenswrapper[5037]: I1126 14:18:56.963440 5037 generic.go:334] "Generic (PLEG): container finished" podID="354f57d7-61fc-44f1-ab03-d7bab0a6a984" containerID="cd1c861f1e1e28fcf918abe996f388c27b16fab64e20057f814b2be581c9f56e" exitCode=0 Nov 26 14:18:56 crc kubenswrapper[5037]: I1126 14:18:56.963523 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-smcps" Nov 26 14:18:56 crc kubenswrapper[5037]: I1126 14:18:56.963518 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-smcps" event={"ID":"354f57d7-61fc-44f1-ab03-d7bab0a6a984","Type":"ContainerDied","Data":"cd1c861f1e1e28fcf918abe996f388c27b16fab64e20057f814b2be581c9f56e"} Nov 26 14:18:56 crc kubenswrapper[5037]: I1126 14:18:56.964181 5037 scope.go:117] "RemoveContainer" containerID="cd1c861f1e1e28fcf918abe996f388c27b16fab64e20057f814b2be581c9f56e" Nov 26 14:18:56 crc kubenswrapper[5037]: I1126 14:18:56.964052 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-smcps" event={"ID":"354f57d7-61fc-44f1-ab03-d7bab0a6a984","Type":"ContainerDied","Data":"a08713f1aee66f4b925ad035829039ea516e9f93d92c2a3cc2455b684b61129d"} Nov 26 14:18:56 crc kubenswrapper[5037]: I1126 14:18:56.966346 5037 generic.go:334] "Generic (PLEG): container finished" podID="570f926c-8f52-4b77-a139-bfa9d3b61071" containerID="8275addaa95be45de966e97d7fe694e8548a52e013963a612ac6ce2643360927" exitCode=0 Nov 26 14:18:56 crc kubenswrapper[5037]: I1126 14:18:56.966442 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5sj7" event={"ID":"570f926c-8f52-4b77-a139-bfa9d3b61071","Type":"ContainerDied","Data":"8275addaa95be45de966e97d7fe694e8548a52e013963a612ac6ce2643360927"} Nov 26 14:18:56 crc kubenswrapper[5037]: I1126 14:18:56.982835 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-msn7s" event={"ID":"e33b5e20-dd02-4850-b59b-40a271de1b3f","Type":"ContainerStarted","Data":"1af61d3729cdc507cd1d56e096b3b50561f5ebbeefa85d78043fda571e830c47"} Nov 26 14:18:56 crc kubenswrapper[5037]: I1126 14:18:56.992571 5037 scope.go:117] "RemoveContainer" containerID="ad0706ec85c3cff29c39bb87403013a873a8d30acd860478064a082b633b509f" Nov 26 14:18:57 crc kubenswrapper[5037]: I1126 14:18:57.001203 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hjjdh" event={"ID":"c6c8b303-75aa-4a87-a45c-fd1776689864","Type":"ContainerDied","Data":"09e9589fa94c0e614de6d2520f494f10e74f6906029e9fc8d452556d290e84f1"} Nov 26 14:18:57 crc kubenswrapper[5037]: I1126 14:18:57.001469 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hjjdh" Nov 26 14:18:57 crc kubenswrapper[5037]: I1126 14:18:57.037046 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-msn7s" podStartSLOduration=2.725458874 podStartE2EDuration="53.037023904s" podCreationTimestamp="2025-11-26 14:18:04 +0000 UTC" firstStartedPulling="2025-11-26 14:18:06.257832563 +0000 UTC m=+153.054602747" lastFinishedPulling="2025-11-26 14:18:56.569397593 +0000 UTC m=+203.366167777" observedRunningTime="2025-11-26 14:18:57.032090358 +0000 UTC m=+203.828860542" watchObservedRunningTime="2025-11-26 14:18:57.037023904 +0000 UTC m=+203.833794088" Nov 26 14:18:57 crc kubenswrapper[5037]: I1126 14:18:57.081307 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-smcps"] Nov 26 14:18:57 crc kubenswrapper[5037]: I1126 14:18:57.087957 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-smcps"] Nov 26 14:18:57 crc kubenswrapper[5037]: I1126 14:18:57.094650 5037 scope.go:117] "RemoveContainer" containerID="9be2f0a076ae619178c392e44b984208322a662769d0bd37f8c81cbc2ca07572" Nov 26 14:18:57 crc kubenswrapper[5037]: I1126 14:18:57.100774 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hjjdh"] Nov 26 14:18:57 crc kubenswrapper[5037]: I1126 14:18:57.104009 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-hjjdh"] Nov 26 14:18:57 crc kubenswrapper[5037]: I1126 14:18:57.120138 5037 scope.go:117] "RemoveContainer" containerID="cd1c861f1e1e28fcf918abe996f388c27b16fab64e20057f814b2be581c9f56e" Nov 26 14:18:57 crc kubenswrapper[5037]: E1126 14:18:57.120945 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cd1c861f1e1e28fcf918abe996f388c27b16fab64e20057f814b2be581c9f56e\": container with ID starting with cd1c861f1e1e28fcf918abe996f388c27b16fab64e20057f814b2be581c9f56e not found: ID does not exist" containerID="cd1c861f1e1e28fcf918abe996f388c27b16fab64e20057f814b2be581c9f56e" Nov 26 14:18:57 crc kubenswrapper[5037]: I1126 14:18:57.120986 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd1c861f1e1e28fcf918abe996f388c27b16fab64e20057f814b2be581c9f56e"} err="failed to get container status \"cd1c861f1e1e28fcf918abe996f388c27b16fab64e20057f814b2be581c9f56e\": rpc error: code = NotFound desc = could not find container \"cd1c861f1e1e28fcf918abe996f388c27b16fab64e20057f814b2be581c9f56e\": container with ID starting with cd1c861f1e1e28fcf918abe996f388c27b16fab64e20057f814b2be581c9f56e not found: ID does not exist" Nov 26 14:18:57 crc kubenswrapper[5037]: I1126 14:18:57.121058 5037 scope.go:117] "RemoveContainer" containerID="ad0706ec85c3cff29c39bb87403013a873a8d30acd860478064a082b633b509f" Nov 26 14:18:57 crc kubenswrapper[5037]: E1126 14:18:57.122046 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ad0706ec85c3cff29c39bb87403013a873a8d30acd860478064a082b633b509f\": container with ID starting with ad0706ec85c3cff29c39bb87403013a873a8d30acd860478064a082b633b509f not found: ID does not exist" containerID="ad0706ec85c3cff29c39bb87403013a873a8d30acd860478064a082b633b509f" Nov 26 14:18:57 crc kubenswrapper[5037]: I1126 14:18:57.122077 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ad0706ec85c3cff29c39bb87403013a873a8d30acd860478064a082b633b509f"} err="failed to get container status \"ad0706ec85c3cff29c39bb87403013a873a8d30acd860478064a082b633b509f\": rpc error: code = NotFound desc = could not find container \"ad0706ec85c3cff29c39bb87403013a873a8d30acd860478064a082b633b509f\": container with ID starting with ad0706ec85c3cff29c39bb87403013a873a8d30acd860478064a082b633b509f not found: ID does not exist" Nov 26 14:18:57 crc kubenswrapper[5037]: I1126 14:18:57.122095 5037 scope.go:117] "RemoveContainer" containerID="9be2f0a076ae619178c392e44b984208322a662769d0bd37f8c81cbc2ca07572" Nov 26 14:18:57 crc kubenswrapper[5037]: E1126 14:18:57.122457 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9be2f0a076ae619178c392e44b984208322a662769d0bd37f8c81cbc2ca07572\": container with ID starting with 9be2f0a076ae619178c392e44b984208322a662769d0bd37f8c81cbc2ca07572 not found: ID does not exist" containerID="9be2f0a076ae619178c392e44b984208322a662769d0bd37f8c81cbc2ca07572" Nov 26 14:18:57 crc kubenswrapper[5037]: I1126 14:18:57.122526 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9be2f0a076ae619178c392e44b984208322a662769d0bd37f8c81cbc2ca07572"} err="failed to get container status \"9be2f0a076ae619178c392e44b984208322a662769d0bd37f8c81cbc2ca07572\": rpc error: code = NotFound desc = could not find container \"9be2f0a076ae619178c392e44b984208322a662769d0bd37f8c81cbc2ca07572\": container with ID starting with 9be2f0a076ae619178c392e44b984208322a662769d0bd37f8c81cbc2ca07572 not found: ID does not exist" Nov 26 14:18:57 crc kubenswrapper[5037]: I1126 14:18:57.122546 5037 scope.go:117] "RemoveContainer" containerID="6d37f75349d262f6794d56fbc104efd0c4ce76751dce93e5880129545baeca7f" Nov 26 14:18:57 crc kubenswrapper[5037]: I1126 14:18:57.143641 5037 scope.go:117] "RemoveContainer" containerID="e7261a8e44f033926aa01fd9757d1db51f1dc4840dc7ec5bfd28ce43ffb0ce38" Nov 26 14:18:57 crc kubenswrapper[5037]: I1126 14:18:57.180144 5037 scope.go:117] "RemoveContainer" containerID="23f84bf55da11b202e24899abf62e8650c151a7464afe07163b25f61edf5d5d8" Nov 26 14:18:57 crc kubenswrapper[5037]: I1126 14:18:57.327382 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wsj5x" Nov 26 14:18:57 crc kubenswrapper[5037]: I1126 14:18:57.428428 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/707b088e-aa31-4988-8677-6dcac9117725-catalog-content\") pod \"707b088e-aa31-4988-8677-6dcac9117725\" (UID: \"707b088e-aa31-4988-8677-6dcac9117725\") " Nov 26 14:18:57 crc kubenswrapper[5037]: I1126 14:18:57.428477 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/707b088e-aa31-4988-8677-6dcac9117725-utilities\") pod \"707b088e-aa31-4988-8677-6dcac9117725\" (UID: \"707b088e-aa31-4988-8677-6dcac9117725\") " Nov 26 14:18:57 crc kubenswrapper[5037]: I1126 14:18:57.428562 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fb4kz\" (UniqueName: \"kubernetes.io/projected/707b088e-aa31-4988-8677-6dcac9117725-kube-api-access-fb4kz\") pod \"707b088e-aa31-4988-8677-6dcac9117725\" (UID: \"707b088e-aa31-4988-8677-6dcac9117725\") " Nov 26 14:18:57 crc kubenswrapper[5037]: I1126 14:18:57.430681 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/707b088e-aa31-4988-8677-6dcac9117725-utilities" (OuterVolumeSpecName: "utilities") pod "707b088e-aa31-4988-8677-6dcac9117725" (UID: "707b088e-aa31-4988-8677-6dcac9117725"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:18:57 crc kubenswrapper[5037]: I1126 14:18:57.435049 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/707b088e-aa31-4988-8677-6dcac9117725-kube-api-access-fb4kz" (OuterVolumeSpecName: "kube-api-access-fb4kz") pod "707b088e-aa31-4988-8677-6dcac9117725" (UID: "707b088e-aa31-4988-8677-6dcac9117725"). InnerVolumeSpecName "kube-api-access-fb4kz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:18:57 crc kubenswrapper[5037]: I1126 14:18:57.453253 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/707b088e-aa31-4988-8677-6dcac9117725-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "707b088e-aa31-4988-8677-6dcac9117725" (UID: "707b088e-aa31-4988-8677-6dcac9117725"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:18:57 crc kubenswrapper[5037]: I1126 14:18:57.529799 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/707b088e-aa31-4988-8677-6dcac9117725-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 14:18:57 crc kubenswrapper[5037]: I1126 14:18:57.530040 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/707b088e-aa31-4988-8677-6dcac9117725-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 14:18:57 crc kubenswrapper[5037]: I1126 14:18:57.530103 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fb4kz\" (UniqueName: \"kubernetes.io/projected/707b088e-aa31-4988-8677-6dcac9117725-kube-api-access-fb4kz\") on node \"crc\" DevicePath \"\"" Nov 26 14:18:57 crc kubenswrapper[5037]: I1126 14:18:57.916931 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="354f57d7-61fc-44f1-ab03-d7bab0a6a984" path="/var/lib/kubelet/pods/354f57d7-61fc-44f1-ab03-d7bab0a6a984/volumes" Nov 26 14:18:57 crc kubenswrapper[5037]: I1126 14:18:57.918056 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c6c8b303-75aa-4a87-a45c-fd1776689864" path="/var/lib/kubelet/pods/c6c8b303-75aa-4a87-a45c-fd1776689864/volumes" Nov 26 14:18:58 crc kubenswrapper[5037]: I1126 14:18:58.011105 5037 generic.go:334] "Generic (PLEG): container finished" podID="707b088e-aa31-4988-8677-6dcac9117725" containerID="4bd3ebdda6ad70a778389275491727028c0afdaad9fbafe37198005afdc4b2da" exitCode=0 Nov 26 14:18:58 crc kubenswrapper[5037]: I1126 14:18:58.011171 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wsj5x" Nov 26 14:18:58 crc kubenswrapper[5037]: I1126 14:18:58.011162 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wsj5x" event={"ID":"707b088e-aa31-4988-8677-6dcac9117725","Type":"ContainerDied","Data":"4bd3ebdda6ad70a778389275491727028c0afdaad9fbafe37198005afdc4b2da"} Nov 26 14:18:58 crc kubenswrapper[5037]: I1126 14:18:58.011632 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wsj5x" event={"ID":"707b088e-aa31-4988-8677-6dcac9117725","Type":"ContainerDied","Data":"7ae9e78d00273155d3e7bece699f60eb47e1432d17cfb6ad0ede43405ddb085e"} Nov 26 14:18:58 crc kubenswrapper[5037]: I1126 14:18:58.011656 5037 scope.go:117] "RemoveContainer" containerID="4bd3ebdda6ad70a778389275491727028c0afdaad9fbafe37198005afdc4b2da" Nov 26 14:18:58 crc kubenswrapper[5037]: I1126 14:18:58.015983 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m8gz7" event={"ID":"f379a727-1bc7-469d-8148-b7fb1abb5155","Type":"ContainerStarted","Data":"25ac19827c3815eae23d456da47c5d846d63b191e05808a65c074afa3f4984b0"} Nov 26 14:18:58 crc kubenswrapper[5037]: I1126 14:18:58.020966 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5sj7" event={"ID":"570f926c-8f52-4b77-a139-bfa9d3b61071","Type":"ContainerStarted","Data":"b8db39bb5ad4b54572b8df0d1e0361e5c360a34a7b38e8aafcd252f318944683"} Nov 26 14:18:58 crc kubenswrapper[5037]: I1126 14:18:58.025817 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-np59v" event={"ID":"a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8","Type":"ContainerStarted","Data":"48eaea061f738b3b8ba79fb9d0f1cdcb3376a1de5e8b59d2cbe4dd7ac72c4bd5"} Nov 26 14:18:58 crc kubenswrapper[5037]: I1126 14:18:58.032771 5037 scope.go:117] "RemoveContainer" containerID="44c646debec275dd1535e88181fdb5fd2d641c5658d457750b556e66e0e49f9c" Nov 26 14:18:58 crc kubenswrapper[5037]: I1126 14:18:58.034100 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wsj5x"] Nov 26 14:18:58 crc kubenswrapper[5037]: I1126 14:18:58.047655 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-wsj5x"] Nov 26 14:18:58 crc kubenswrapper[5037]: I1126 14:18:58.052435 5037 scope.go:117] "RemoveContainer" containerID="beda4050355ce07612aa16319836730cd34f5144da51fd3a490c0809545b5431" Nov 26 14:18:58 crc kubenswrapper[5037]: I1126 14:18:58.058528 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-m8gz7" podStartSLOduration=3.77630248 podStartE2EDuration="56.058505627s" podCreationTimestamp="2025-11-26 14:18:02 +0000 UTC" firstStartedPulling="2025-11-26 14:18:05.103042613 +0000 UTC m=+151.899812797" lastFinishedPulling="2025-11-26 14:18:57.38524577 +0000 UTC m=+204.182015944" observedRunningTime="2025-11-26 14:18:58.054607238 +0000 UTC m=+204.851377422" watchObservedRunningTime="2025-11-26 14:18:58.058505627 +0000 UTC m=+204.855275801" Nov 26 14:18:58 crc kubenswrapper[5037]: I1126 14:18:58.071677 5037 scope.go:117] "RemoveContainer" containerID="4bd3ebdda6ad70a778389275491727028c0afdaad9fbafe37198005afdc4b2da" Nov 26 14:18:58 crc kubenswrapper[5037]: I1126 14:18:58.074623 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-b5sj7" podStartSLOduration=2.890978705 podStartE2EDuration="53.074589746s" podCreationTimestamp="2025-11-26 14:18:05 +0000 UTC" firstStartedPulling="2025-11-26 14:18:07.35194761 +0000 UTC m=+154.148717794" lastFinishedPulling="2025-11-26 14:18:57.535558651 +0000 UTC m=+204.332328835" observedRunningTime="2025-11-26 14:18:58.072258917 +0000 UTC m=+204.869029111" watchObservedRunningTime="2025-11-26 14:18:58.074589746 +0000 UTC m=+204.871359930" Nov 26 14:18:58 crc kubenswrapper[5037]: E1126 14:18:58.075420 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4bd3ebdda6ad70a778389275491727028c0afdaad9fbafe37198005afdc4b2da\": container with ID starting with 4bd3ebdda6ad70a778389275491727028c0afdaad9fbafe37198005afdc4b2da not found: ID does not exist" containerID="4bd3ebdda6ad70a778389275491727028c0afdaad9fbafe37198005afdc4b2da" Nov 26 14:18:58 crc kubenswrapper[5037]: I1126 14:18:58.075472 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4bd3ebdda6ad70a778389275491727028c0afdaad9fbafe37198005afdc4b2da"} err="failed to get container status \"4bd3ebdda6ad70a778389275491727028c0afdaad9fbafe37198005afdc4b2da\": rpc error: code = NotFound desc = could not find container \"4bd3ebdda6ad70a778389275491727028c0afdaad9fbafe37198005afdc4b2da\": container with ID starting with 4bd3ebdda6ad70a778389275491727028c0afdaad9fbafe37198005afdc4b2da not found: ID does not exist" Nov 26 14:18:58 crc kubenswrapper[5037]: I1126 14:18:58.075510 5037 scope.go:117] "RemoveContainer" containerID="44c646debec275dd1535e88181fdb5fd2d641c5658d457750b556e66e0e49f9c" Nov 26 14:18:58 crc kubenswrapper[5037]: E1126 14:18:58.075983 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"44c646debec275dd1535e88181fdb5fd2d641c5658d457750b556e66e0e49f9c\": container with ID starting with 44c646debec275dd1535e88181fdb5fd2d641c5658d457750b556e66e0e49f9c not found: ID does not exist" containerID="44c646debec275dd1535e88181fdb5fd2d641c5658d457750b556e66e0e49f9c" Nov 26 14:18:58 crc kubenswrapper[5037]: I1126 14:18:58.076040 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"44c646debec275dd1535e88181fdb5fd2d641c5658d457750b556e66e0e49f9c"} err="failed to get container status \"44c646debec275dd1535e88181fdb5fd2d641c5658d457750b556e66e0e49f9c\": rpc error: code = NotFound desc = could not find container \"44c646debec275dd1535e88181fdb5fd2d641c5658d457750b556e66e0e49f9c\": container with ID starting with 44c646debec275dd1535e88181fdb5fd2d641c5658d457750b556e66e0e49f9c not found: ID does not exist" Nov 26 14:18:58 crc kubenswrapper[5037]: I1126 14:18:58.076084 5037 scope.go:117] "RemoveContainer" containerID="beda4050355ce07612aa16319836730cd34f5144da51fd3a490c0809545b5431" Nov 26 14:18:58 crc kubenswrapper[5037]: E1126 14:18:58.076626 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"beda4050355ce07612aa16319836730cd34f5144da51fd3a490c0809545b5431\": container with ID starting with beda4050355ce07612aa16319836730cd34f5144da51fd3a490c0809545b5431 not found: ID does not exist" containerID="beda4050355ce07612aa16319836730cd34f5144da51fd3a490c0809545b5431" Nov 26 14:18:58 crc kubenswrapper[5037]: I1126 14:18:58.076680 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"beda4050355ce07612aa16319836730cd34f5144da51fd3a490c0809545b5431"} err="failed to get container status \"beda4050355ce07612aa16319836730cd34f5144da51fd3a490c0809545b5431\": rpc error: code = NotFound desc = could not find container \"beda4050355ce07612aa16319836730cd34f5144da51fd3a490c0809545b5431\": container with ID starting with beda4050355ce07612aa16319836730cd34f5144da51fd3a490c0809545b5431 not found: ID does not exist" Nov 26 14:18:59 crc kubenswrapper[5037]: I1126 14:18:59.035082 5037 generic.go:334] "Generic (PLEG): container finished" podID="a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8" containerID="48eaea061f738b3b8ba79fb9d0f1cdcb3376a1de5e8b59d2cbe4dd7ac72c4bd5" exitCode=0 Nov 26 14:18:59 crc kubenswrapper[5037]: I1126 14:18:59.035169 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-np59v" event={"ID":"a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8","Type":"ContainerDied","Data":"48eaea061f738b3b8ba79fb9d0f1cdcb3376a1de5e8b59d2cbe4dd7ac72c4bd5"} Nov 26 14:18:59 crc kubenswrapper[5037]: I1126 14:18:59.916061 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="707b088e-aa31-4988-8677-6dcac9117725" path="/var/lib/kubelet/pods/707b088e-aa31-4988-8677-6dcac9117725/volumes" Nov 26 14:19:00 crc kubenswrapper[5037]: I1126 14:19:00.044304 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-np59v" event={"ID":"a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8","Type":"ContainerStarted","Data":"6410eaf5358c5195b9b796ceaf38ee84101066c9c4b815c3fd5c3457ea38dc68"} Nov 26 14:19:02 crc kubenswrapper[5037]: I1126 14:19:02.811344 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-m8gz7" Nov 26 14:19:02 crc kubenswrapper[5037]: I1126 14:19:02.812442 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-m8gz7" Nov 26 14:19:02 crc kubenswrapper[5037]: I1126 14:19:02.856486 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-m8gz7" Nov 26 14:19:02 crc kubenswrapper[5037]: I1126 14:19:02.903104 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-np59v" podStartSLOduration=5.75818716 podStartE2EDuration="57.903081671s" podCreationTimestamp="2025-11-26 14:18:05 +0000 UTC" firstStartedPulling="2025-11-26 14:18:07.358978359 +0000 UTC m=+154.155748543" lastFinishedPulling="2025-11-26 14:18:59.50387286 +0000 UTC m=+206.300643054" observedRunningTime="2025-11-26 14:19:00.068689669 +0000 UTC m=+206.865459873" watchObservedRunningTime="2025-11-26 14:19:02.903081671 +0000 UTC m=+209.699851855" Nov 26 14:19:03 crc kubenswrapper[5037]: I1126 14:19:03.119765 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-m8gz7" Nov 26 14:19:04 crc kubenswrapper[5037]: I1126 14:19:04.655164 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-msn7s" Nov 26 14:19:04 crc kubenswrapper[5037]: I1126 14:19:04.655219 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-msn7s" Nov 26 14:19:04 crc kubenswrapper[5037]: I1126 14:19:04.702989 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-msn7s" Nov 26 14:19:05 crc kubenswrapper[5037]: I1126 14:19:05.126746 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-msn7s" Nov 26 14:19:05 crc kubenswrapper[5037]: I1126 14:19:05.841335 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-b5sj7" Nov 26 14:19:05 crc kubenswrapper[5037]: I1126 14:19:05.842747 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-b5sj7" Nov 26 14:19:05 crc kubenswrapper[5037]: I1126 14:19:05.917501 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-b5sj7" Nov 26 14:19:06 crc kubenswrapper[5037]: I1126 14:19:06.178687 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-b5sj7" Nov 26 14:19:06 crc kubenswrapper[5037]: I1126 14:19:06.183137 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-np59v" Nov 26 14:19:06 crc kubenswrapper[5037]: I1126 14:19:06.183253 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-np59v" Nov 26 14:19:06 crc kubenswrapper[5037]: I1126 14:19:06.280612 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-np59v" Nov 26 14:19:07 crc kubenswrapper[5037]: I1126 14:19:07.149641 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-np59v" Nov 26 14:19:08 crc kubenswrapper[5037]: I1126 14:19:08.152658 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-np59v"] Nov 26 14:19:09 crc kubenswrapper[5037]: I1126 14:19:09.113910 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-np59v" podUID="a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8" containerName="registry-server" containerID="cri-o://6410eaf5358c5195b9b796ceaf38ee84101066c9c4b815c3fd5c3457ea38dc68" gracePeriod=2 Nov 26 14:19:09 crc kubenswrapper[5037]: I1126 14:19:09.465176 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-np59v" Nov 26 14:19:09 crc kubenswrapper[5037]: I1126 14:19:09.529224 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8-utilities\") pod \"a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8\" (UID: \"a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8\") " Nov 26 14:19:09 crc kubenswrapper[5037]: I1126 14:19:09.529399 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dcdvd\" (UniqueName: \"kubernetes.io/projected/a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8-kube-api-access-dcdvd\") pod \"a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8\" (UID: \"a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8\") " Nov 26 14:19:09 crc kubenswrapper[5037]: I1126 14:19:09.530223 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8-utilities" (OuterVolumeSpecName: "utilities") pod "a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8" (UID: "a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:19:09 crc kubenswrapper[5037]: I1126 14:19:09.530851 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8-catalog-content\") pod \"a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8\" (UID: \"a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8\") " Nov 26 14:19:09 crc kubenswrapper[5037]: I1126 14:19:09.531161 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 14:19:09 crc kubenswrapper[5037]: I1126 14:19:09.535771 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8-kube-api-access-dcdvd" (OuterVolumeSpecName: "kube-api-access-dcdvd") pod "a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8" (UID: "a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8"). InnerVolumeSpecName "kube-api-access-dcdvd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:19:09 crc kubenswrapper[5037]: I1126 14:19:09.632649 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dcdvd\" (UniqueName: \"kubernetes.io/projected/a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8-kube-api-access-dcdvd\") on node \"crc\" DevicePath \"\"" Nov 26 14:19:09 crc kubenswrapper[5037]: I1126 14:19:09.967753 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8" (UID: "a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:19:10 crc kubenswrapper[5037]: I1126 14:19:10.040117 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 14:19:10 crc kubenswrapper[5037]: I1126 14:19:10.126645 5037 generic.go:334] "Generic (PLEG): container finished" podID="a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8" containerID="6410eaf5358c5195b9b796ceaf38ee84101066c9c4b815c3fd5c3457ea38dc68" exitCode=0 Nov 26 14:19:10 crc kubenswrapper[5037]: I1126 14:19:10.126711 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-np59v" event={"ID":"a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8","Type":"ContainerDied","Data":"6410eaf5358c5195b9b796ceaf38ee84101066c9c4b815c3fd5c3457ea38dc68"} Nov 26 14:19:10 crc kubenswrapper[5037]: I1126 14:19:10.126748 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-np59v" event={"ID":"a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8","Type":"ContainerDied","Data":"41704a4c1a34935b2fb8791ff052d4d912aee0f299eb6e5f74bcde7071c958e5"} Nov 26 14:19:10 crc kubenswrapper[5037]: I1126 14:19:10.126794 5037 scope.go:117] "RemoveContainer" containerID="6410eaf5358c5195b9b796ceaf38ee84101066c9c4b815c3fd5c3457ea38dc68" Nov 26 14:19:10 crc kubenswrapper[5037]: I1126 14:19:10.126908 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-np59v" Nov 26 14:19:10 crc kubenswrapper[5037]: I1126 14:19:10.159440 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-np59v"] Nov 26 14:19:10 crc kubenswrapper[5037]: I1126 14:19:10.162718 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-np59v"] Nov 26 14:19:10 crc kubenswrapper[5037]: I1126 14:19:10.164160 5037 scope.go:117] "RemoveContainer" containerID="48eaea061f738b3b8ba79fb9d0f1cdcb3376a1de5e8b59d2cbe4dd7ac72c4bd5" Nov 26 14:19:10 crc kubenswrapper[5037]: I1126 14:19:10.189564 5037 scope.go:117] "RemoveContainer" containerID="f56d1d8ed76a2d6938a10971022747efa0df312f78de4743d87c4b0c4012ceb7" Nov 26 14:19:10 crc kubenswrapper[5037]: I1126 14:19:10.212819 5037 scope.go:117] "RemoveContainer" containerID="6410eaf5358c5195b9b796ceaf38ee84101066c9c4b815c3fd5c3457ea38dc68" Nov 26 14:19:10 crc kubenswrapper[5037]: E1126 14:19:10.213442 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6410eaf5358c5195b9b796ceaf38ee84101066c9c4b815c3fd5c3457ea38dc68\": container with ID starting with 6410eaf5358c5195b9b796ceaf38ee84101066c9c4b815c3fd5c3457ea38dc68 not found: ID does not exist" containerID="6410eaf5358c5195b9b796ceaf38ee84101066c9c4b815c3fd5c3457ea38dc68" Nov 26 14:19:10 crc kubenswrapper[5037]: I1126 14:19:10.213571 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6410eaf5358c5195b9b796ceaf38ee84101066c9c4b815c3fd5c3457ea38dc68"} err="failed to get container status \"6410eaf5358c5195b9b796ceaf38ee84101066c9c4b815c3fd5c3457ea38dc68\": rpc error: code = NotFound desc = could not find container \"6410eaf5358c5195b9b796ceaf38ee84101066c9c4b815c3fd5c3457ea38dc68\": container with ID starting with 6410eaf5358c5195b9b796ceaf38ee84101066c9c4b815c3fd5c3457ea38dc68 not found: ID does not exist" Nov 26 14:19:10 crc kubenswrapper[5037]: I1126 14:19:10.213679 5037 scope.go:117] "RemoveContainer" containerID="48eaea061f738b3b8ba79fb9d0f1cdcb3376a1de5e8b59d2cbe4dd7ac72c4bd5" Nov 26 14:19:10 crc kubenswrapper[5037]: E1126 14:19:10.214262 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"48eaea061f738b3b8ba79fb9d0f1cdcb3376a1de5e8b59d2cbe4dd7ac72c4bd5\": container with ID starting with 48eaea061f738b3b8ba79fb9d0f1cdcb3376a1de5e8b59d2cbe4dd7ac72c4bd5 not found: ID does not exist" containerID="48eaea061f738b3b8ba79fb9d0f1cdcb3376a1de5e8b59d2cbe4dd7ac72c4bd5" Nov 26 14:19:10 crc kubenswrapper[5037]: I1126 14:19:10.214391 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48eaea061f738b3b8ba79fb9d0f1cdcb3376a1de5e8b59d2cbe4dd7ac72c4bd5"} err="failed to get container status \"48eaea061f738b3b8ba79fb9d0f1cdcb3376a1de5e8b59d2cbe4dd7ac72c4bd5\": rpc error: code = NotFound desc = could not find container \"48eaea061f738b3b8ba79fb9d0f1cdcb3376a1de5e8b59d2cbe4dd7ac72c4bd5\": container with ID starting with 48eaea061f738b3b8ba79fb9d0f1cdcb3376a1de5e8b59d2cbe4dd7ac72c4bd5 not found: ID does not exist" Nov 26 14:19:10 crc kubenswrapper[5037]: I1126 14:19:10.214464 5037 scope.go:117] "RemoveContainer" containerID="f56d1d8ed76a2d6938a10971022747efa0df312f78de4743d87c4b0c4012ceb7" Nov 26 14:19:10 crc kubenswrapper[5037]: E1126 14:19:10.214877 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f56d1d8ed76a2d6938a10971022747efa0df312f78de4743d87c4b0c4012ceb7\": container with ID starting with f56d1d8ed76a2d6938a10971022747efa0df312f78de4743d87c4b0c4012ceb7 not found: ID does not exist" containerID="f56d1d8ed76a2d6938a10971022747efa0df312f78de4743d87c4b0c4012ceb7" Nov 26 14:19:10 crc kubenswrapper[5037]: I1126 14:19:10.214918 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f56d1d8ed76a2d6938a10971022747efa0df312f78de4743d87c4b0c4012ceb7"} err="failed to get container status \"f56d1d8ed76a2d6938a10971022747efa0df312f78de4743d87c4b0c4012ceb7\": rpc error: code = NotFound desc = could not find container \"f56d1d8ed76a2d6938a10971022747efa0df312f78de4743d87c4b0c4012ceb7\": container with ID starting with f56d1d8ed76a2d6938a10971022747efa0df312f78de4743d87c4b0c4012ceb7 not found: ID does not exist" Nov 26 14:19:11 crc kubenswrapper[5037]: I1126 14:19:11.248476 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 14:19:11 crc kubenswrapper[5037]: I1126 14:19:11.248576 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 14:19:11 crc kubenswrapper[5037]: I1126 14:19:11.248659 5037 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" Nov 26 14:19:11 crc kubenswrapper[5037]: I1126 14:19:11.249605 5037 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ae57cbd99d2dcba3594b74304119a4a8030da193dce32afd77079b3cfaf45713"} pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 14:19:11 crc kubenswrapper[5037]: I1126 14:19:11.249707 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" containerID="cri-o://ae57cbd99d2dcba3594b74304119a4a8030da193dce32afd77079b3cfaf45713" gracePeriod=600 Nov 26 14:19:11 crc kubenswrapper[5037]: I1126 14:19:11.920832 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8" path="/var/lib/kubelet/pods/a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8/volumes" Nov 26 14:19:12 crc kubenswrapper[5037]: I1126 14:19:12.143647 5037 generic.go:334] "Generic (PLEG): container finished" podID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerID="ae57cbd99d2dcba3594b74304119a4a8030da193dce32afd77079b3cfaf45713" exitCode=0 Nov 26 14:19:12 crc kubenswrapper[5037]: I1126 14:19:12.143733 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" event={"ID":"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb","Type":"ContainerDied","Data":"ae57cbd99d2dcba3594b74304119a4a8030da193dce32afd77079b3cfaf45713"} Nov 26 14:19:12 crc kubenswrapper[5037]: I1126 14:19:12.143804 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" event={"ID":"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb","Type":"ContainerStarted","Data":"0ca155f1028f7449a83057d2b19d8707af01de25fbb8c44d82c60ea823dd3d64"} Nov 26 14:19:14 crc kubenswrapper[5037]: I1126 14:19:14.866695 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-c252f"] Nov 26 14:19:39 crc kubenswrapper[5037]: I1126 14:19:39.904519 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-c252f" podUID="13b910b7-69a1-438a-9ebe-d865adc99607" containerName="oauth-openshift" containerID="cri-o://2dbcbfd2f92c71a86a7587ebeb94d010882f9b81a0190c6c8ee23f35c57af1dd" gracePeriod=15 Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.260182 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.313677 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-7795679f96-zm6k2"] Nov 26 14:19:40 crc kubenswrapper[5037]: E1126 14:19:40.313929 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8" containerName="extract-content" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.313945 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8" containerName="extract-content" Nov 26 14:19:40 crc kubenswrapper[5037]: E1126 14:19:40.313958 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa1cfda0-0f53-494b-beb2-8ec2c81fa533" containerName="pruner" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.313965 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa1cfda0-0f53-494b-beb2-8ec2c81fa533" containerName="pruner" Nov 26 14:19:40 crc kubenswrapper[5037]: E1126 14:19:40.313973 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="354f57d7-61fc-44f1-ab03-d7bab0a6a984" containerName="extract-content" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.313980 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="354f57d7-61fc-44f1-ab03-d7bab0a6a984" containerName="extract-content" Nov 26 14:19:40 crc kubenswrapper[5037]: E1126 14:19:40.313990 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13b910b7-69a1-438a-9ebe-d865adc99607" containerName="oauth-openshift" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.313997 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="13b910b7-69a1-438a-9ebe-d865adc99607" containerName="oauth-openshift" Nov 26 14:19:40 crc kubenswrapper[5037]: E1126 14:19:40.314005 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="707b088e-aa31-4988-8677-6dcac9117725" containerName="extract-content" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.314015 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="707b088e-aa31-4988-8677-6dcac9117725" containerName="extract-content" Nov 26 14:19:40 crc kubenswrapper[5037]: E1126 14:19:40.314028 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8" containerName="registry-server" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.314035 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8" containerName="registry-server" Nov 26 14:19:40 crc kubenswrapper[5037]: E1126 14:19:40.314047 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6c8b303-75aa-4a87-a45c-fd1776689864" containerName="registry-server" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.314055 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6c8b303-75aa-4a87-a45c-fd1776689864" containerName="registry-server" Nov 26 14:19:40 crc kubenswrapper[5037]: E1126 14:19:40.314067 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="707b088e-aa31-4988-8677-6dcac9117725" containerName="extract-utilities" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.314077 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="707b088e-aa31-4988-8677-6dcac9117725" containerName="extract-utilities" Nov 26 14:19:40 crc kubenswrapper[5037]: E1126 14:19:40.314092 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="707b088e-aa31-4988-8677-6dcac9117725" containerName="registry-server" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.314101 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="707b088e-aa31-4988-8677-6dcac9117725" containerName="registry-server" Nov 26 14:19:40 crc kubenswrapper[5037]: E1126 14:19:40.314111 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="354f57d7-61fc-44f1-ab03-d7bab0a6a984" containerName="registry-server" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.314122 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="354f57d7-61fc-44f1-ab03-d7bab0a6a984" containerName="registry-server" Nov 26 14:19:40 crc kubenswrapper[5037]: E1126 14:19:40.314134 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="354f57d7-61fc-44f1-ab03-d7bab0a6a984" containerName="extract-utilities" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.314143 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="354f57d7-61fc-44f1-ab03-d7bab0a6a984" containerName="extract-utilities" Nov 26 14:19:40 crc kubenswrapper[5037]: E1126 14:19:40.314154 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8" containerName="extract-utilities" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.314162 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8" containerName="extract-utilities" Nov 26 14:19:40 crc kubenswrapper[5037]: E1126 14:19:40.314171 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6c8b303-75aa-4a87-a45c-fd1776689864" containerName="extract-content" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.314178 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6c8b303-75aa-4a87-a45c-fd1776689864" containerName="extract-content" Nov 26 14:19:40 crc kubenswrapper[5037]: E1126 14:19:40.314192 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d896f7f-fe00-4729-89ef-1321f399a314" containerName="pruner" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.314200 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d896f7f-fe00-4729-89ef-1321f399a314" containerName="pruner" Nov 26 14:19:40 crc kubenswrapper[5037]: E1126 14:19:40.314215 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6c8b303-75aa-4a87-a45c-fd1776689864" containerName="extract-utilities" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.314223 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6c8b303-75aa-4a87-a45c-fd1776689864" containerName="extract-utilities" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.314353 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa1cfda0-0f53-494b-beb2-8ec2c81fa533" containerName="pruner" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.314371 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="354f57d7-61fc-44f1-ab03-d7bab0a6a984" containerName="registry-server" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.314381 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="707b088e-aa31-4988-8677-6dcac9117725" containerName="registry-server" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.314391 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="13b910b7-69a1-438a-9ebe-d865adc99607" containerName="oauth-openshift" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.314404 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6c8b303-75aa-4a87-a45c-fd1776689864" containerName="registry-server" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.314416 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9e08d30-22dd-4fc2-ab4f-a742e2e9c3d8" containerName="registry-server" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.314425 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d896f7f-fe00-4729-89ef-1321f399a314" containerName="pruner" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.314936 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.316684 5037 generic.go:334] "Generic (PLEG): container finished" podID="13b910b7-69a1-438a-9ebe-d865adc99607" containerID="2dbcbfd2f92c71a86a7587ebeb94d010882f9b81a0190c6c8ee23f35c57af1dd" exitCode=0 Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.316737 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-c252f" event={"ID":"13b910b7-69a1-438a-9ebe-d865adc99607","Type":"ContainerDied","Data":"2dbcbfd2f92c71a86a7587ebeb94d010882f9b81a0190c6c8ee23f35c57af1dd"} Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.316769 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-c252f" event={"ID":"13b910b7-69a1-438a-9ebe-d865adc99607","Type":"ContainerDied","Data":"fabbb490c1b57f17bd5f317af2abcffde728cb5cdc650d7ddc220dbbfd908c2c"} Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.316786 5037 scope.go:117] "RemoveContainer" containerID="2dbcbfd2f92c71a86a7587ebeb94d010882f9b81a0190c6c8ee23f35c57af1dd" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.316796 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-c252f" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.327924 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-7795679f96-zm6k2"] Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.339939 5037 scope.go:117] "RemoveContainer" containerID="2dbcbfd2f92c71a86a7587ebeb94d010882f9b81a0190c6c8ee23f35c57af1dd" Nov 26 14:19:40 crc kubenswrapper[5037]: E1126 14:19:40.340551 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2dbcbfd2f92c71a86a7587ebeb94d010882f9b81a0190c6c8ee23f35c57af1dd\": container with ID starting with 2dbcbfd2f92c71a86a7587ebeb94d010882f9b81a0190c6c8ee23f35c57af1dd not found: ID does not exist" containerID="2dbcbfd2f92c71a86a7587ebeb94d010882f9b81a0190c6c8ee23f35c57af1dd" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.340616 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2dbcbfd2f92c71a86a7587ebeb94d010882f9b81a0190c6c8ee23f35c57af1dd"} err="failed to get container status \"2dbcbfd2f92c71a86a7587ebeb94d010882f9b81a0190c6c8ee23f35c57af1dd\": rpc error: code = NotFound desc = could not find container \"2dbcbfd2f92c71a86a7587ebeb94d010882f9b81a0190c6c8ee23f35c57af1dd\": container with ID starting with 2dbcbfd2f92c71a86a7587ebeb94d010882f9b81a0190c6c8ee23f35c57af1dd not found: ID does not exist" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.391954 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/13b910b7-69a1-438a-9ebe-d865adc99607-audit-dir\") pod \"13b910b7-69a1-438a-9ebe-d865adc99607\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.392023 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-user-template-login\") pod \"13b910b7-69a1-438a-9ebe-d865adc99607\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.392066 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-serving-cert\") pod \"13b910b7-69a1-438a-9ebe-d865adc99607\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.392096 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/13b910b7-69a1-438a-9ebe-d865adc99607-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "13b910b7-69a1-438a-9ebe-d865adc99607" (UID: "13b910b7-69a1-438a-9ebe-d865adc99607"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.392144 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rsxzw\" (UniqueName: \"kubernetes.io/projected/13b910b7-69a1-438a-9ebe-d865adc99607-kube-api-access-rsxzw\") pod \"13b910b7-69a1-438a-9ebe-d865adc99607\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.392185 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-cliconfig\") pod \"13b910b7-69a1-438a-9ebe-d865adc99607\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.392217 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-session\") pod \"13b910b7-69a1-438a-9ebe-d865adc99607\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.392239 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-user-template-error\") pod \"13b910b7-69a1-438a-9ebe-d865adc99607\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.392268 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-trusted-ca-bundle\") pod \"13b910b7-69a1-438a-9ebe-d865adc99607\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.392358 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-ocp-branding-template\") pod \"13b910b7-69a1-438a-9ebe-d865adc99607\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.392395 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-router-certs\") pod \"13b910b7-69a1-438a-9ebe-d865adc99607\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.392431 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-user-idp-0-file-data\") pod \"13b910b7-69a1-438a-9ebe-d865adc99607\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.392466 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-service-ca\") pod \"13b910b7-69a1-438a-9ebe-d865adc99607\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.392490 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-user-template-provider-selection\") pod \"13b910b7-69a1-438a-9ebe-d865adc99607\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.392508 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/13b910b7-69a1-438a-9ebe-d865adc99607-audit-policies\") pod \"13b910b7-69a1-438a-9ebe-d865adc99607\" (UID: \"13b910b7-69a1-438a-9ebe-d865adc99607\") " Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.392718 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/d1310d52-791d-4200-a162-8de8c40cf2dd-v4-0-config-system-session\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.392746 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/d1310d52-791d-4200-a162-8de8c40cf2dd-v4-0-config-system-service-ca\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.392797 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/d1310d52-791d-4200-a162-8de8c40cf2dd-v4-0-config-user-template-error\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.392819 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/d1310d52-791d-4200-a162-8de8c40cf2dd-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.392837 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/d1310d52-791d-4200-a162-8de8c40cf2dd-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.392857 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/d1310d52-791d-4200-a162-8de8c40cf2dd-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.392873 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gr9pg\" (UniqueName: \"kubernetes.io/projected/d1310d52-791d-4200-a162-8de8c40cf2dd-kube-api-access-gr9pg\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.392892 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d1310d52-791d-4200-a162-8de8c40cf2dd-audit-dir\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.392909 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/d1310d52-791d-4200-a162-8de8c40cf2dd-audit-policies\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.392926 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/d1310d52-791d-4200-a162-8de8c40cf2dd-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.392964 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d1310d52-791d-4200-a162-8de8c40cf2dd-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.392993 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/d1310d52-791d-4200-a162-8de8c40cf2dd-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.393012 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/d1310d52-791d-4200-a162-8de8c40cf2dd-v4-0-config-system-router-certs\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.393026 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/d1310d52-791d-4200-a162-8de8c40cf2dd-v4-0-config-user-template-login\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.393076 5037 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/13b910b7-69a1-438a-9ebe-d865adc99607-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.395131 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "13b910b7-69a1-438a-9ebe-d865adc99607" (UID: "13b910b7-69a1-438a-9ebe-d865adc99607"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.395254 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "13b910b7-69a1-438a-9ebe-d865adc99607" (UID: "13b910b7-69a1-438a-9ebe-d865adc99607"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.395688 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "13b910b7-69a1-438a-9ebe-d865adc99607" (UID: "13b910b7-69a1-438a-9ebe-d865adc99607"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.396373 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/13b910b7-69a1-438a-9ebe-d865adc99607-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "13b910b7-69a1-438a-9ebe-d865adc99607" (UID: "13b910b7-69a1-438a-9ebe-d865adc99607"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.400247 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13b910b7-69a1-438a-9ebe-d865adc99607-kube-api-access-rsxzw" (OuterVolumeSpecName: "kube-api-access-rsxzw") pod "13b910b7-69a1-438a-9ebe-d865adc99607" (UID: "13b910b7-69a1-438a-9ebe-d865adc99607"). InnerVolumeSpecName "kube-api-access-rsxzw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.400502 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "13b910b7-69a1-438a-9ebe-d865adc99607" (UID: "13b910b7-69a1-438a-9ebe-d865adc99607"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.401164 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "13b910b7-69a1-438a-9ebe-d865adc99607" (UID: "13b910b7-69a1-438a-9ebe-d865adc99607"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.406513 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "13b910b7-69a1-438a-9ebe-d865adc99607" (UID: "13b910b7-69a1-438a-9ebe-d865adc99607"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.411968 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "13b910b7-69a1-438a-9ebe-d865adc99607" (UID: "13b910b7-69a1-438a-9ebe-d865adc99607"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.412707 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "13b910b7-69a1-438a-9ebe-d865adc99607" (UID: "13b910b7-69a1-438a-9ebe-d865adc99607"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.413391 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "13b910b7-69a1-438a-9ebe-d865adc99607" (UID: "13b910b7-69a1-438a-9ebe-d865adc99607"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.414224 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "13b910b7-69a1-438a-9ebe-d865adc99607" (UID: "13b910b7-69a1-438a-9ebe-d865adc99607"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.416743 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "13b910b7-69a1-438a-9ebe-d865adc99607" (UID: "13b910b7-69a1-438a-9ebe-d865adc99607"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.493995 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/d1310d52-791d-4200-a162-8de8c40cf2dd-v4-0-config-user-template-error\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.494100 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/d1310d52-791d-4200-a162-8de8c40cf2dd-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.494128 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/d1310d52-791d-4200-a162-8de8c40cf2dd-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.494148 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/d1310d52-791d-4200-a162-8de8c40cf2dd-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.494168 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gr9pg\" (UniqueName: \"kubernetes.io/projected/d1310d52-791d-4200-a162-8de8c40cf2dd-kube-api-access-gr9pg\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.494192 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d1310d52-791d-4200-a162-8de8c40cf2dd-audit-dir\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.494216 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/d1310d52-791d-4200-a162-8de8c40cf2dd-audit-policies\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.494233 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/d1310d52-791d-4200-a162-8de8c40cf2dd-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.494295 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d1310d52-791d-4200-a162-8de8c40cf2dd-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.494326 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/d1310d52-791d-4200-a162-8de8c40cf2dd-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.494346 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/d1310d52-791d-4200-a162-8de8c40cf2dd-v4-0-config-system-router-certs\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.494342 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d1310d52-791d-4200-a162-8de8c40cf2dd-audit-dir\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.494369 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/d1310d52-791d-4200-a162-8de8c40cf2dd-v4-0-config-user-template-login\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.494586 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/d1310d52-791d-4200-a162-8de8c40cf2dd-v4-0-config-system-session\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.494626 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/d1310d52-791d-4200-a162-8de8c40cf2dd-v4-0-config-system-service-ca\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.494782 5037 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.494797 5037 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.494814 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rsxzw\" (UniqueName: \"kubernetes.io/projected/13b910b7-69a1-438a-9ebe-d865adc99607-kube-api-access-rsxzw\") on node \"crc\" DevicePath \"\"" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.494830 5037 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.494845 5037 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.494856 5037 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.494873 5037 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.494890 5037 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.494904 5037 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.494918 5037 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.494930 5037 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.494946 5037 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/13b910b7-69a1-438a-9ebe-d865adc99607-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.494961 5037 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/13b910b7-69a1-438a-9ebe-d865adc99607-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.495193 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/d1310d52-791d-4200-a162-8de8c40cf2dd-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.495258 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/d1310d52-791d-4200-a162-8de8c40cf2dd-audit-policies\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.495563 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/d1310d52-791d-4200-a162-8de8c40cf2dd-v4-0-config-system-service-ca\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.496879 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d1310d52-791d-4200-a162-8de8c40cf2dd-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.498111 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/d1310d52-791d-4200-a162-8de8c40cf2dd-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.498129 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/d1310d52-791d-4200-a162-8de8c40cf2dd-v4-0-config-user-template-error\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.498943 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/d1310d52-791d-4200-a162-8de8c40cf2dd-v4-0-config-user-template-login\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.499151 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/d1310d52-791d-4200-a162-8de8c40cf2dd-v4-0-config-system-router-certs\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.499398 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/d1310d52-791d-4200-a162-8de8c40cf2dd-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.499546 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/d1310d52-791d-4200-a162-8de8c40cf2dd-v4-0-config-system-session\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.505440 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/d1310d52-791d-4200-a162-8de8c40cf2dd-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.505502 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/d1310d52-791d-4200-a162-8de8c40cf2dd-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.514489 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gr9pg\" (UniqueName: \"kubernetes.io/projected/d1310d52-791d-4200-a162-8de8c40cf2dd-kube-api-access-gr9pg\") pod \"oauth-openshift-7795679f96-zm6k2\" (UID: \"d1310d52-791d-4200-a162-8de8c40cf2dd\") " pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.629673 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.646676 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-c252f"] Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.651855 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-c252f"] Nov 26 14:19:40 crc kubenswrapper[5037]: I1126 14:19:40.873249 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-7795679f96-zm6k2"] Nov 26 14:19:41 crc kubenswrapper[5037]: I1126 14:19:41.326414 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" event={"ID":"d1310d52-791d-4200-a162-8de8c40cf2dd","Type":"ContainerStarted","Data":"cbdc80c53842b7e405506752dacf626d88a8718bff91afa68638db4f9462b246"} Nov 26 14:19:41 crc kubenswrapper[5037]: I1126 14:19:41.327103 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:41 crc kubenswrapper[5037]: I1126 14:19:41.327162 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" event={"ID":"d1310d52-791d-4200-a162-8de8c40cf2dd","Type":"ContainerStarted","Data":"2b876a55d2b092ec91bb38b2d0b01ef3e53fd45695b3aea8d4563db39f0da3f7"} Nov 26 14:19:41 crc kubenswrapper[5037]: I1126 14:19:41.350167 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" podStartSLOduration=27.350138236 podStartE2EDuration="27.350138236s" podCreationTimestamp="2025-11-26 14:19:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:19:41.347262453 +0000 UTC m=+248.144032737" watchObservedRunningTime="2025-11-26 14:19:41.350138236 +0000 UTC m=+248.146908420" Nov 26 14:19:41 crc kubenswrapper[5037]: I1126 14:19:41.549456 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-7795679f96-zm6k2" Nov 26 14:19:41 crc kubenswrapper[5037]: I1126 14:19:41.921642 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="13b910b7-69a1-438a-9ebe-d865adc99607" path="/var/lib/kubelet/pods/13b910b7-69a1-438a-9ebe-d865adc99607/volumes" Nov 26 14:20:33 crc kubenswrapper[5037]: I1126 14:20:33.679292 5037 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Nov 26 14:21:11 crc kubenswrapper[5037]: I1126 14:21:11.248102 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 14:21:11 crc kubenswrapper[5037]: I1126 14:21:11.250554 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 14:21:41 crc kubenswrapper[5037]: I1126 14:21:41.248045 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 14:21:41 crc kubenswrapper[5037]: I1126 14:21:41.249498 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 14:22:11 crc kubenswrapper[5037]: I1126 14:22:11.247612 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 14:22:11 crc kubenswrapper[5037]: I1126 14:22:11.248799 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 14:22:11 crc kubenswrapper[5037]: I1126 14:22:11.248883 5037 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" Nov 26 14:22:11 crc kubenswrapper[5037]: I1126 14:22:11.249928 5037 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0ca155f1028f7449a83057d2b19d8707af01de25fbb8c44d82c60ea823dd3d64"} pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 14:22:11 crc kubenswrapper[5037]: I1126 14:22:11.250046 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" containerID="cri-o://0ca155f1028f7449a83057d2b19d8707af01de25fbb8c44d82c60ea823dd3d64" gracePeriod=600 Nov 26 14:22:11 crc kubenswrapper[5037]: I1126 14:22:11.401074 5037 generic.go:334] "Generic (PLEG): container finished" podID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerID="0ca155f1028f7449a83057d2b19d8707af01de25fbb8c44d82c60ea823dd3d64" exitCode=0 Nov 26 14:22:11 crc kubenswrapper[5037]: I1126 14:22:11.401154 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" event={"ID":"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb","Type":"ContainerDied","Data":"0ca155f1028f7449a83057d2b19d8707af01de25fbb8c44d82c60ea823dd3d64"} Nov 26 14:22:11 crc kubenswrapper[5037]: I1126 14:22:11.401212 5037 scope.go:117] "RemoveContainer" containerID="ae57cbd99d2dcba3594b74304119a4a8030da193dce32afd77079b3cfaf45713" Nov 26 14:22:12 crc kubenswrapper[5037]: I1126 14:22:12.413405 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" event={"ID":"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb","Type":"ContainerStarted","Data":"b7afa716ab555c514aa4b783f55103f0b795f534b642704349668ad1f4f2718c"} Nov 26 14:23:28 crc kubenswrapper[5037]: I1126 14:23:28.063404 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 26 14:23:28 crc kubenswrapper[5037]: I1126 14:23:28.066439 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 14:23:28 crc kubenswrapper[5037]: I1126 14:23:28.071193 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 26 14:23:28 crc kubenswrapper[5037]: I1126 14:23:28.073503 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 26 14:23:28 crc kubenswrapper[5037]: I1126 14:23:28.091783 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 26 14:23:28 crc kubenswrapper[5037]: I1126 14:23:28.194401 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2e669d88-4513-42ff-b722-a7c9730922f7-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"2e669d88-4513-42ff-b722-a7c9730922f7\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 14:23:28 crc kubenswrapper[5037]: I1126 14:23:28.194482 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2e669d88-4513-42ff-b722-a7c9730922f7-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"2e669d88-4513-42ff-b722-a7c9730922f7\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 14:23:28 crc kubenswrapper[5037]: I1126 14:23:28.296023 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2e669d88-4513-42ff-b722-a7c9730922f7-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"2e669d88-4513-42ff-b722-a7c9730922f7\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 14:23:28 crc kubenswrapper[5037]: I1126 14:23:28.296117 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2e669d88-4513-42ff-b722-a7c9730922f7-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"2e669d88-4513-42ff-b722-a7c9730922f7\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 14:23:28 crc kubenswrapper[5037]: I1126 14:23:28.296224 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2e669d88-4513-42ff-b722-a7c9730922f7-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"2e669d88-4513-42ff-b722-a7c9730922f7\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 14:23:28 crc kubenswrapper[5037]: I1126 14:23:28.322568 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2e669d88-4513-42ff-b722-a7c9730922f7-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"2e669d88-4513-42ff-b722-a7c9730922f7\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 14:23:28 crc kubenswrapper[5037]: I1126 14:23:28.399185 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 14:23:28 crc kubenswrapper[5037]: I1126 14:23:28.635806 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 26 14:23:29 crc kubenswrapper[5037]: I1126 14:23:29.001521 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"2e669d88-4513-42ff-b722-a7c9730922f7","Type":"ContainerStarted","Data":"501154d22ad5aefe22c36a182404a03c1ca37dbec22b1f06e839cbfa4dc15b27"} Nov 26 14:23:30 crc kubenswrapper[5037]: I1126 14:23:30.013193 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"2e669d88-4513-42ff-b722-a7c9730922f7","Type":"ContainerStarted","Data":"f3b675a2eda58162e99611a581431f309f08a4d5ef021360e8adbd22d855c925"} Nov 26 14:23:30 crc kubenswrapper[5037]: I1126 14:23:30.044914 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=2.044884234 podStartE2EDuration="2.044884234s" podCreationTimestamp="2025-11-26 14:23:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:23:30.036346747 +0000 UTC m=+476.833116951" watchObservedRunningTime="2025-11-26 14:23:30.044884234 +0000 UTC m=+476.841654448" Nov 26 14:23:31 crc kubenswrapper[5037]: I1126 14:23:31.022963 5037 generic.go:334] "Generic (PLEG): container finished" podID="2e669d88-4513-42ff-b722-a7c9730922f7" containerID="f3b675a2eda58162e99611a581431f309f08a4d5ef021360e8adbd22d855c925" exitCode=0 Nov 26 14:23:31 crc kubenswrapper[5037]: I1126 14:23:31.023050 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"2e669d88-4513-42ff-b722-a7c9730922f7","Type":"ContainerDied","Data":"f3b675a2eda58162e99611a581431f309f08a4d5ef021360e8adbd22d855c925"} Nov 26 14:23:32 crc kubenswrapper[5037]: I1126 14:23:32.331468 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 14:23:32 crc kubenswrapper[5037]: I1126 14:23:32.457055 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2e669d88-4513-42ff-b722-a7c9730922f7-kubelet-dir\") pod \"2e669d88-4513-42ff-b722-a7c9730922f7\" (UID: \"2e669d88-4513-42ff-b722-a7c9730922f7\") " Nov 26 14:23:32 crc kubenswrapper[5037]: I1126 14:23:32.457222 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2e669d88-4513-42ff-b722-a7c9730922f7-kube-api-access\") pod \"2e669d88-4513-42ff-b722-a7c9730922f7\" (UID: \"2e669d88-4513-42ff-b722-a7c9730922f7\") " Nov 26 14:23:32 crc kubenswrapper[5037]: I1126 14:23:32.457259 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2e669d88-4513-42ff-b722-a7c9730922f7-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "2e669d88-4513-42ff-b722-a7c9730922f7" (UID: "2e669d88-4513-42ff-b722-a7c9730922f7"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:23:32 crc kubenswrapper[5037]: I1126 14:23:32.457598 5037 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2e669d88-4513-42ff-b722-a7c9730922f7-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 26 14:23:32 crc kubenswrapper[5037]: I1126 14:23:32.467711 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e669d88-4513-42ff-b722-a7c9730922f7-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "2e669d88-4513-42ff-b722-a7c9730922f7" (UID: "2e669d88-4513-42ff-b722-a7c9730922f7"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:23:32 crc kubenswrapper[5037]: I1126 14:23:32.558884 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2e669d88-4513-42ff-b722-a7c9730922f7-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 14:23:32 crc kubenswrapper[5037]: I1126 14:23:32.632081 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 26 14:23:32 crc kubenswrapper[5037]: E1126 14:23:32.632382 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e669d88-4513-42ff-b722-a7c9730922f7" containerName="pruner" Nov 26 14:23:32 crc kubenswrapper[5037]: I1126 14:23:32.632402 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e669d88-4513-42ff-b722-a7c9730922f7" containerName="pruner" Nov 26 14:23:32 crc kubenswrapper[5037]: I1126 14:23:32.632544 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e669d88-4513-42ff-b722-a7c9730922f7" containerName="pruner" Nov 26 14:23:32 crc kubenswrapper[5037]: I1126 14:23:32.633271 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 26 14:23:32 crc kubenswrapper[5037]: I1126 14:23:32.641944 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 26 14:23:32 crc kubenswrapper[5037]: I1126 14:23:32.761009 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/287ee161-b8a6-4dbf-b2e7-d32380e75f47-var-lock\") pod \"installer-9-crc\" (UID: \"287ee161-b8a6-4dbf-b2e7-d32380e75f47\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 14:23:32 crc kubenswrapper[5037]: I1126 14:23:32.761563 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/287ee161-b8a6-4dbf-b2e7-d32380e75f47-kubelet-dir\") pod \"installer-9-crc\" (UID: \"287ee161-b8a6-4dbf-b2e7-d32380e75f47\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 14:23:32 crc kubenswrapper[5037]: I1126 14:23:32.761600 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/287ee161-b8a6-4dbf-b2e7-d32380e75f47-kube-api-access\") pod \"installer-9-crc\" (UID: \"287ee161-b8a6-4dbf-b2e7-d32380e75f47\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 14:23:32 crc kubenswrapper[5037]: I1126 14:23:32.862831 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/287ee161-b8a6-4dbf-b2e7-d32380e75f47-var-lock\") pod \"installer-9-crc\" (UID: \"287ee161-b8a6-4dbf-b2e7-d32380e75f47\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 14:23:32 crc kubenswrapper[5037]: I1126 14:23:32.862941 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/287ee161-b8a6-4dbf-b2e7-d32380e75f47-kubelet-dir\") pod \"installer-9-crc\" (UID: \"287ee161-b8a6-4dbf-b2e7-d32380e75f47\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 14:23:32 crc kubenswrapper[5037]: I1126 14:23:32.862981 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/287ee161-b8a6-4dbf-b2e7-d32380e75f47-kube-api-access\") pod \"installer-9-crc\" (UID: \"287ee161-b8a6-4dbf-b2e7-d32380e75f47\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 14:23:32 crc kubenswrapper[5037]: I1126 14:23:32.862983 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/287ee161-b8a6-4dbf-b2e7-d32380e75f47-var-lock\") pod \"installer-9-crc\" (UID: \"287ee161-b8a6-4dbf-b2e7-d32380e75f47\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 14:23:32 crc kubenswrapper[5037]: I1126 14:23:32.863072 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/287ee161-b8a6-4dbf-b2e7-d32380e75f47-kubelet-dir\") pod \"installer-9-crc\" (UID: \"287ee161-b8a6-4dbf-b2e7-d32380e75f47\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 14:23:32 crc kubenswrapper[5037]: I1126 14:23:32.887376 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/287ee161-b8a6-4dbf-b2e7-d32380e75f47-kube-api-access\") pod \"installer-9-crc\" (UID: \"287ee161-b8a6-4dbf-b2e7-d32380e75f47\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 14:23:32 crc kubenswrapper[5037]: I1126 14:23:32.971045 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 26 14:23:33 crc kubenswrapper[5037]: I1126 14:23:33.049081 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"2e669d88-4513-42ff-b722-a7c9730922f7","Type":"ContainerDied","Data":"501154d22ad5aefe22c36a182404a03c1ca37dbec22b1f06e839cbfa4dc15b27"} Nov 26 14:23:33 crc kubenswrapper[5037]: I1126 14:23:33.049126 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="501154d22ad5aefe22c36a182404a03c1ca37dbec22b1f06e839cbfa4dc15b27" Nov 26 14:23:33 crc kubenswrapper[5037]: I1126 14:23:33.049176 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 14:23:33 crc kubenswrapper[5037]: I1126 14:23:33.168682 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 26 14:23:33 crc kubenswrapper[5037]: W1126 14:23:33.176632 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod287ee161_b8a6_4dbf_b2e7_d32380e75f47.slice/crio-947ade94ff2a9c9c96eb0ff8d636a2270aead66b6ffd86fa53a4f1a4e9a77de6 WatchSource:0}: Error finding container 947ade94ff2a9c9c96eb0ff8d636a2270aead66b6ffd86fa53a4f1a4e9a77de6: Status 404 returned error can't find the container with id 947ade94ff2a9c9c96eb0ff8d636a2270aead66b6ffd86fa53a4f1a4e9a77de6 Nov 26 14:23:34 crc kubenswrapper[5037]: I1126 14:23:34.057914 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"287ee161-b8a6-4dbf-b2e7-d32380e75f47","Type":"ContainerStarted","Data":"189eea5b9195529a9a3a54f3b1a9a3a08a8f9607c51a75936853973cd16d6a1c"} Nov 26 14:23:34 crc kubenswrapper[5037]: I1126 14:23:34.059403 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"287ee161-b8a6-4dbf-b2e7-d32380e75f47","Type":"ContainerStarted","Data":"947ade94ff2a9c9c96eb0ff8d636a2270aead66b6ffd86fa53a4f1a4e9a77de6"} Nov 26 14:23:34 crc kubenswrapper[5037]: I1126 14:23:34.079734 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=2.079716416 podStartE2EDuration="2.079716416s" podCreationTimestamp="2025-11-26 14:23:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:23:34.077003977 +0000 UTC m=+480.873774191" watchObservedRunningTime="2025-11-26 14:23:34.079716416 +0000 UTC m=+480.876486600" Nov 26 14:23:36 crc kubenswrapper[5037]: I1126 14:23:36.649184 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-kch98"] Nov 26 14:23:36 crc kubenswrapper[5037]: I1126 14:23:36.652109 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-kch98" Nov 26 14:23:36 crc kubenswrapper[5037]: I1126 14:23:36.664158 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-kch98"] Nov 26 14:23:36 crc kubenswrapper[5037]: I1126 14:23:36.780696 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/702e0571-5540-45c5-88fc-b2593f32751c-registry-tls\") pod \"image-registry-66df7c8f76-kch98\" (UID: \"702e0571-5540-45c5-88fc-b2593f32751c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kch98" Nov 26 14:23:36 crc kubenswrapper[5037]: I1126 14:23:36.780762 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/702e0571-5540-45c5-88fc-b2593f32751c-bound-sa-token\") pod \"image-registry-66df7c8f76-kch98\" (UID: \"702e0571-5540-45c5-88fc-b2593f32751c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kch98" Nov 26 14:23:36 crc kubenswrapper[5037]: I1126 14:23:36.780784 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/702e0571-5540-45c5-88fc-b2593f32751c-ca-trust-extracted\") pod \"image-registry-66df7c8f76-kch98\" (UID: \"702e0571-5540-45c5-88fc-b2593f32751c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kch98" Nov 26 14:23:36 crc kubenswrapper[5037]: I1126 14:23:36.780811 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/702e0571-5540-45c5-88fc-b2593f32751c-installation-pull-secrets\") pod \"image-registry-66df7c8f76-kch98\" (UID: \"702e0571-5540-45c5-88fc-b2593f32751c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kch98" Nov 26 14:23:36 crc kubenswrapper[5037]: I1126 14:23:36.780848 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-kch98\" (UID: \"702e0571-5540-45c5-88fc-b2593f32751c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kch98" Nov 26 14:23:36 crc kubenswrapper[5037]: I1126 14:23:36.780867 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z45cx\" (UniqueName: \"kubernetes.io/projected/702e0571-5540-45c5-88fc-b2593f32751c-kube-api-access-z45cx\") pod \"image-registry-66df7c8f76-kch98\" (UID: \"702e0571-5540-45c5-88fc-b2593f32751c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kch98" Nov 26 14:23:36 crc kubenswrapper[5037]: I1126 14:23:36.780887 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/702e0571-5540-45c5-88fc-b2593f32751c-trusted-ca\") pod \"image-registry-66df7c8f76-kch98\" (UID: \"702e0571-5540-45c5-88fc-b2593f32751c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kch98" Nov 26 14:23:36 crc kubenswrapper[5037]: I1126 14:23:36.780910 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/702e0571-5540-45c5-88fc-b2593f32751c-registry-certificates\") pod \"image-registry-66df7c8f76-kch98\" (UID: \"702e0571-5540-45c5-88fc-b2593f32751c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kch98" Nov 26 14:23:36 crc kubenswrapper[5037]: I1126 14:23:36.805723 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-kch98\" (UID: \"702e0571-5540-45c5-88fc-b2593f32751c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kch98" Nov 26 14:23:36 crc kubenswrapper[5037]: I1126 14:23:36.882004 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/702e0571-5540-45c5-88fc-b2593f32751c-bound-sa-token\") pod \"image-registry-66df7c8f76-kch98\" (UID: \"702e0571-5540-45c5-88fc-b2593f32751c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kch98" Nov 26 14:23:36 crc kubenswrapper[5037]: I1126 14:23:36.882064 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/702e0571-5540-45c5-88fc-b2593f32751c-ca-trust-extracted\") pod \"image-registry-66df7c8f76-kch98\" (UID: \"702e0571-5540-45c5-88fc-b2593f32751c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kch98" Nov 26 14:23:36 crc kubenswrapper[5037]: I1126 14:23:36.882128 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/702e0571-5540-45c5-88fc-b2593f32751c-installation-pull-secrets\") pod \"image-registry-66df7c8f76-kch98\" (UID: \"702e0571-5540-45c5-88fc-b2593f32751c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kch98" Nov 26 14:23:36 crc kubenswrapper[5037]: I1126 14:23:36.882162 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z45cx\" (UniqueName: \"kubernetes.io/projected/702e0571-5540-45c5-88fc-b2593f32751c-kube-api-access-z45cx\") pod \"image-registry-66df7c8f76-kch98\" (UID: \"702e0571-5540-45c5-88fc-b2593f32751c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kch98" Nov 26 14:23:36 crc kubenswrapper[5037]: I1126 14:23:36.882183 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/702e0571-5540-45c5-88fc-b2593f32751c-trusted-ca\") pod \"image-registry-66df7c8f76-kch98\" (UID: \"702e0571-5540-45c5-88fc-b2593f32751c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kch98" Nov 26 14:23:36 crc kubenswrapper[5037]: I1126 14:23:36.882206 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/702e0571-5540-45c5-88fc-b2593f32751c-registry-certificates\") pod \"image-registry-66df7c8f76-kch98\" (UID: \"702e0571-5540-45c5-88fc-b2593f32751c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kch98" Nov 26 14:23:36 crc kubenswrapper[5037]: I1126 14:23:36.882250 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/702e0571-5540-45c5-88fc-b2593f32751c-registry-tls\") pod \"image-registry-66df7c8f76-kch98\" (UID: \"702e0571-5540-45c5-88fc-b2593f32751c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kch98" Nov 26 14:23:36 crc kubenswrapper[5037]: I1126 14:23:36.883151 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/702e0571-5540-45c5-88fc-b2593f32751c-ca-trust-extracted\") pod \"image-registry-66df7c8f76-kch98\" (UID: \"702e0571-5540-45c5-88fc-b2593f32751c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kch98" Nov 26 14:23:36 crc kubenswrapper[5037]: I1126 14:23:36.883921 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/702e0571-5540-45c5-88fc-b2593f32751c-registry-certificates\") pod \"image-registry-66df7c8f76-kch98\" (UID: \"702e0571-5540-45c5-88fc-b2593f32751c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kch98" Nov 26 14:23:36 crc kubenswrapper[5037]: I1126 14:23:36.884874 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/702e0571-5540-45c5-88fc-b2593f32751c-trusted-ca\") pod \"image-registry-66df7c8f76-kch98\" (UID: \"702e0571-5540-45c5-88fc-b2593f32751c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kch98" Nov 26 14:23:36 crc kubenswrapper[5037]: I1126 14:23:36.888732 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/702e0571-5540-45c5-88fc-b2593f32751c-installation-pull-secrets\") pod \"image-registry-66df7c8f76-kch98\" (UID: \"702e0571-5540-45c5-88fc-b2593f32751c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kch98" Nov 26 14:23:36 crc kubenswrapper[5037]: I1126 14:23:36.889473 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/702e0571-5540-45c5-88fc-b2593f32751c-registry-tls\") pod \"image-registry-66df7c8f76-kch98\" (UID: \"702e0571-5540-45c5-88fc-b2593f32751c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kch98" Nov 26 14:23:36 crc kubenswrapper[5037]: I1126 14:23:36.899082 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/702e0571-5540-45c5-88fc-b2593f32751c-bound-sa-token\") pod \"image-registry-66df7c8f76-kch98\" (UID: \"702e0571-5540-45c5-88fc-b2593f32751c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kch98" Nov 26 14:23:36 crc kubenswrapper[5037]: I1126 14:23:36.899762 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z45cx\" (UniqueName: \"kubernetes.io/projected/702e0571-5540-45c5-88fc-b2593f32751c-kube-api-access-z45cx\") pod \"image-registry-66df7c8f76-kch98\" (UID: \"702e0571-5540-45c5-88fc-b2593f32751c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kch98" Nov 26 14:23:36 crc kubenswrapper[5037]: I1126 14:23:36.970327 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-kch98" Nov 26 14:23:37 crc kubenswrapper[5037]: I1126 14:23:37.414969 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-kch98"] Nov 26 14:23:37 crc kubenswrapper[5037]: W1126 14:23:37.423861 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod702e0571_5540_45c5_88fc_b2593f32751c.slice/crio-982f45b622a0a9e60a3c7a656cfb25684a1bbf21a558d4599075651a3b372a20 WatchSource:0}: Error finding container 982f45b622a0a9e60a3c7a656cfb25684a1bbf21a558d4599075651a3b372a20: Status 404 returned error can't find the container with id 982f45b622a0a9e60a3c7a656cfb25684a1bbf21a558d4599075651a3b372a20 Nov 26 14:23:38 crc kubenswrapper[5037]: I1126 14:23:38.108442 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-kch98" event={"ID":"702e0571-5540-45c5-88fc-b2593f32751c","Type":"ContainerStarted","Data":"3de5d836de27e0de03bab97bf6896ab8cf26c222bddf6c2f728cdf9e7554a19a"} Nov 26 14:23:38 crc kubenswrapper[5037]: I1126 14:23:38.108911 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-kch98" Nov 26 14:23:38 crc kubenswrapper[5037]: I1126 14:23:38.108928 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-kch98" event={"ID":"702e0571-5540-45c5-88fc-b2593f32751c","Type":"ContainerStarted","Data":"982f45b622a0a9e60a3c7a656cfb25684a1bbf21a558d4599075651a3b372a20"} Nov 26 14:23:38 crc kubenswrapper[5037]: I1126 14:23:38.133980 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-kch98" podStartSLOduration=2.13395532 podStartE2EDuration="2.13395532s" podCreationTimestamp="2025-11-26 14:23:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:23:38.129738783 +0000 UTC m=+484.926508987" watchObservedRunningTime="2025-11-26 14:23:38.13395532 +0000 UTC m=+484.930725514" Nov 26 14:23:56 crc kubenswrapper[5037]: I1126 14:23:56.977092 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-kch98" Nov 26 14:23:57 crc kubenswrapper[5037]: I1126 14:23:57.045912 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-nwzvj"] Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.248151 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.249074 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.416930 5037 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.417961 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.418552 5037 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.419103 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a" gracePeriod=15 Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.419120 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4" gracePeriod=15 Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.419213 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8" gracePeriod=15 Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.419320 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375" gracePeriod=15 Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.419323 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a" gracePeriod=15 Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.419742 5037 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 26 14:24:11 crc kubenswrapper[5037]: E1126 14:24:11.419929 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.419943 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 26 14:24:11 crc kubenswrapper[5037]: E1126 14:24:11.419955 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.419961 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 26 14:24:11 crc kubenswrapper[5037]: E1126 14:24:11.419972 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.419979 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 26 14:24:11 crc kubenswrapper[5037]: E1126 14:24:11.419990 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.419997 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 14:24:11 crc kubenswrapper[5037]: E1126 14:24:11.420008 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.420017 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 26 14:24:11 crc kubenswrapper[5037]: E1126 14:24:11.420031 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.420037 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.420152 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.420162 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.420170 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.420178 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.420187 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.420200 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 26 14:24:11 crc kubenswrapper[5037]: E1126 14:24:11.420316 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.420328 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 14:24:11 crc kubenswrapper[5037]: E1126 14:24:11.502058 5037 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.69:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.604267 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.604427 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.604460 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.604516 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.604541 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.604568 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.604604 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.604719 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.706410 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.706884 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.707008 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.706596 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.706955 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.707211 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.707317 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.707412 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.707536 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.707661 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.707605 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.707725 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.707834 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.707965 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.708106 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.708313 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.787674 5037 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:6443/readyz\": dial tcp 192.168.126.11:6443: connect: connection refused" start-of-body= Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.788662 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="Get \"https://192.168.126.11:6443/readyz\": dial tcp 192.168.126.11:6443: connect: connection refused" Nov 26 14:24:11 crc kubenswrapper[5037]: I1126 14:24:11.805170 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 14:24:11 crc kubenswrapper[5037]: E1126 14:24:11.832545 5037 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.69:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187b949630e3f67e openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-26 14:24:11.831932542 +0000 UTC m=+518.628702726,LastTimestamp:2025-11-26 14:24:11.831932542 +0000 UTC m=+518.628702726,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 26 14:24:11 crc kubenswrapper[5037]: E1126 14:24:11.942528 5037 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.69:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187b949630e3f67e openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-26 14:24:11.831932542 +0000 UTC m=+518.628702726,LastTimestamp:2025-11-26 14:24:11.831932542 +0000 UTC m=+518.628702726,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 26 14:24:12 crc kubenswrapper[5037]: I1126 14:24:12.352548 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"eb8d9e8fc948bdfa392c4f090012c1962aaad4ff957160f49ebc9d923801c698"} Nov 26 14:24:12 crc kubenswrapper[5037]: I1126 14:24:12.353000 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"0da726dafc85c65016d5979914f4f82556f3f501746eada8564093a68f9013f7"} Nov 26 14:24:12 crc kubenswrapper[5037]: E1126 14:24:12.354124 5037 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.69:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 14:24:12 crc kubenswrapper[5037]: I1126 14:24:12.355581 5037 generic.go:334] "Generic (PLEG): container finished" podID="287ee161-b8a6-4dbf-b2e7-d32380e75f47" containerID="189eea5b9195529a9a3a54f3b1a9a3a08a8f9607c51a75936853973cd16d6a1c" exitCode=0 Nov 26 14:24:12 crc kubenswrapper[5037]: I1126 14:24:12.355676 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"287ee161-b8a6-4dbf-b2e7-d32380e75f47","Type":"ContainerDied","Data":"189eea5b9195529a9a3a54f3b1a9a3a08a8f9607c51a75936853973cd16d6a1c"} Nov 26 14:24:12 crc kubenswrapper[5037]: I1126 14:24:12.356776 5037 status_manager.go:851] "Failed to get status for pod" podUID="287ee161-b8a6-4dbf-b2e7-d32380e75f47" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.69:6443: connect: connection refused" Nov 26 14:24:12 crc kubenswrapper[5037]: I1126 14:24:12.358697 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 26 14:24:12 crc kubenswrapper[5037]: I1126 14:24:12.363638 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 26 14:24:12 crc kubenswrapper[5037]: I1126 14:24:12.364605 5037 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375" exitCode=0 Nov 26 14:24:12 crc kubenswrapper[5037]: I1126 14:24:12.364764 5037 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4" exitCode=0 Nov 26 14:24:12 crc kubenswrapper[5037]: I1126 14:24:12.364785 5037 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8" exitCode=0 Nov 26 14:24:12 crc kubenswrapper[5037]: I1126 14:24:12.364797 5037 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a" exitCode=2 Nov 26 14:24:12 crc kubenswrapper[5037]: I1126 14:24:12.364728 5037 scope.go:117] "RemoveContainer" containerID="afdeeeaed68a317ccf777cfdd41e25b4cb72ce6194e6706752e37430f96c992e" Nov 26 14:24:13 crc kubenswrapper[5037]: I1126 14:24:13.381867 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 26 14:24:13 crc kubenswrapper[5037]: I1126 14:24:13.747833 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 26 14:24:13 crc kubenswrapper[5037]: I1126 14:24:13.749406 5037 status_manager.go:851] "Failed to get status for pod" podUID="287ee161-b8a6-4dbf-b2e7-d32380e75f47" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.69:6443: connect: connection refused" Nov 26 14:24:13 crc kubenswrapper[5037]: I1126 14:24:13.833643 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 26 14:24:13 crc kubenswrapper[5037]: I1126 14:24:13.835132 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:24:13 crc kubenswrapper[5037]: I1126 14:24:13.835768 5037 status_manager.go:851] "Failed to get status for pod" podUID="287ee161-b8a6-4dbf-b2e7-d32380e75f47" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.69:6443: connect: connection refused" Nov 26 14:24:13 crc kubenswrapper[5037]: I1126 14:24:13.836045 5037 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.69:6443: connect: connection refused" Nov 26 14:24:13 crc kubenswrapper[5037]: I1126 14:24:13.851527 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/287ee161-b8a6-4dbf-b2e7-d32380e75f47-var-lock\") pod \"287ee161-b8a6-4dbf-b2e7-d32380e75f47\" (UID: \"287ee161-b8a6-4dbf-b2e7-d32380e75f47\") " Nov 26 14:24:13 crc kubenswrapper[5037]: I1126 14:24:13.851664 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/287ee161-b8a6-4dbf-b2e7-d32380e75f47-kube-api-access\") pod \"287ee161-b8a6-4dbf-b2e7-d32380e75f47\" (UID: \"287ee161-b8a6-4dbf-b2e7-d32380e75f47\") " Nov 26 14:24:13 crc kubenswrapper[5037]: I1126 14:24:13.851695 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/287ee161-b8a6-4dbf-b2e7-d32380e75f47-kubelet-dir\") pod \"287ee161-b8a6-4dbf-b2e7-d32380e75f47\" (UID: \"287ee161-b8a6-4dbf-b2e7-d32380e75f47\") " Nov 26 14:24:13 crc kubenswrapper[5037]: I1126 14:24:13.852053 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/287ee161-b8a6-4dbf-b2e7-d32380e75f47-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "287ee161-b8a6-4dbf-b2e7-d32380e75f47" (UID: "287ee161-b8a6-4dbf-b2e7-d32380e75f47"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:24:13 crc kubenswrapper[5037]: I1126 14:24:13.852099 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/287ee161-b8a6-4dbf-b2e7-d32380e75f47-var-lock" (OuterVolumeSpecName: "var-lock") pod "287ee161-b8a6-4dbf-b2e7-d32380e75f47" (UID: "287ee161-b8a6-4dbf-b2e7-d32380e75f47"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:24:13 crc kubenswrapper[5037]: I1126 14:24:13.859025 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/287ee161-b8a6-4dbf-b2e7-d32380e75f47-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "287ee161-b8a6-4dbf-b2e7-d32380e75f47" (UID: "287ee161-b8a6-4dbf-b2e7-d32380e75f47"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:24:13 crc kubenswrapper[5037]: I1126 14:24:13.911475 5037 status_manager.go:851] "Failed to get status for pod" podUID="287ee161-b8a6-4dbf-b2e7-d32380e75f47" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.69:6443: connect: connection refused" Nov 26 14:24:13 crc kubenswrapper[5037]: I1126 14:24:13.911746 5037 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.69:6443: connect: connection refused" Nov 26 14:24:13 crc kubenswrapper[5037]: I1126 14:24:13.952690 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 26 14:24:13 crc kubenswrapper[5037]: I1126 14:24:13.952809 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 26 14:24:13 crc kubenswrapper[5037]: I1126 14:24:13.952816 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:24:13 crc kubenswrapper[5037]: I1126 14:24:13.952918 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:24:13 crc kubenswrapper[5037]: I1126 14:24:13.953102 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 26 14:24:13 crc kubenswrapper[5037]: I1126 14:24:13.953246 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:24:13 crc kubenswrapper[5037]: I1126 14:24:13.953452 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/287ee161-b8a6-4dbf-b2e7-d32380e75f47-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 14:24:13 crc kubenswrapper[5037]: I1126 14:24:13.953482 5037 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/287ee161-b8a6-4dbf-b2e7-d32380e75f47-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 26 14:24:13 crc kubenswrapper[5037]: I1126 14:24:13.953498 5037 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Nov 26 14:24:13 crc kubenswrapper[5037]: I1126 14:24:13.953513 5037 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/287ee161-b8a6-4dbf-b2e7-d32380e75f47-var-lock\") on node \"crc\" DevicePath \"\"" Nov 26 14:24:13 crc kubenswrapper[5037]: I1126 14:24:13.953525 5037 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 26 14:24:13 crc kubenswrapper[5037]: I1126 14:24:13.953536 5037 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 26 14:24:14 crc kubenswrapper[5037]: I1126 14:24:14.392481 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"287ee161-b8a6-4dbf-b2e7-d32380e75f47","Type":"ContainerDied","Data":"947ade94ff2a9c9c96eb0ff8d636a2270aead66b6ffd86fa53a4f1a4e9a77de6"} Nov 26 14:24:14 crc kubenswrapper[5037]: I1126 14:24:14.392980 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="947ade94ff2a9c9c96eb0ff8d636a2270aead66b6ffd86fa53a4f1a4e9a77de6" Nov 26 14:24:14 crc kubenswrapper[5037]: I1126 14:24:14.392553 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 26 14:24:14 crc kubenswrapper[5037]: I1126 14:24:14.397828 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 26 14:24:14 crc kubenswrapper[5037]: I1126 14:24:14.399467 5037 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a" exitCode=0 Nov 26 14:24:14 crc kubenswrapper[5037]: I1126 14:24:14.399537 5037 scope.go:117] "RemoveContainer" containerID="1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375" Nov 26 14:24:14 crc kubenswrapper[5037]: I1126 14:24:14.399708 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:24:14 crc kubenswrapper[5037]: I1126 14:24:14.400040 5037 status_manager.go:851] "Failed to get status for pod" podUID="287ee161-b8a6-4dbf-b2e7-d32380e75f47" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.69:6443: connect: connection refused" Nov 26 14:24:14 crc kubenswrapper[5037]: I1126 14:24:14.400412 5037 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.69:6443: connect: connection refused" Nov 26 14:24:14 crc kubenswrapper[5037]: I1126 14:24:14.400671 5037 status_manager.go:851] "Failed to get status for pod" podUID="287ee161-b8a6-4dbf-b2e7-d32380e75f47" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.69:6443: connect: connection refused" Nov 26 14:24:14 crc kubenswrapper[5037]: I1126 14:24:14.430775 5037 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.69:6443: connect: connection refused" Nov 26 14:24:14 crc kubenswrapper[5037]: I1126 14:24:14.431371 5037 status_manager.go:851] "Failed to get status for pod" podUID="287ee161-b8a6-4dbf-b2e7-d32380e75f47" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.69:6443: connect: connection refused" Nov 26 14:24:14 crc kubenswrapper[5037]: I1126 14:24:14.433025 5037 scope.go:117] "RemoveContainer" containerID="a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4" Nov 26 14:24:14 crc kubenswrapper[5037]: I1126 14:24:14.456200 5037 scope.go:117] "RemoveContainer" containerID="092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8" Nov 26 14:24:14 crc kubenswrapper[5037]: I1126 14:24:14.483771 5037 scope.go:117] "RemoveContainer" containerID="975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a" Nov 26 14:24:14 crc kubenswrapper[5037]: I1126 14:24:14.501097 5037 scope.go:117] "RemoveContainer" containerID="b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a" Nov 26 14:24:14 crc kubenswrapper[5037]: I1126 14:24:14.519263 5037 scope.go:117] "RemoveContainer" containerID="b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c" Nov 26 14:24:14 crc kubenswrapper[5037]: I1126 14:24:14.539665 5037 scope.go:117] "RemoveContainer" containerID="1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375" Nov 26 14:24:14 crc kubenswrapper[5037]: E1126 14:24:14.540317 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\": container with ID starting with 1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375 not found: ID does not exist" containerID="1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375" Nov 26 14:24:14 crc kubenswrapper[5037]: I1126 14:24:14.540386 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375"} err="failed to get container status \"1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\": rpc error: code = NotFound desc = could not find container \"1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375\": container with ID starting with 1755181c055a1df3743bb0924468d36be10ca00cbdec249c4a3cfb60a679c375 not found: ID does not exist" Nov 26 14:24:14 crc kubenswrapper[5037]: I1126 14:24:14.540419 5037 scope.go:117] "RemoveContainer" containerID="a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4" Nov 26 14:24:14 crc kubenswrapper[5037]: E1126 14:24:14.540869 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\": container with ID starting with a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4 not found: ID does not exist" containerID="a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4" Nov 26 14:24:14 crc kubenswrapper[5037]: I1126 14:24:14.540912 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4"} err="failed to get container status \"a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\": rpc error: code = NotFound desc = could not find container \"a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4\": container with ID starting with a7fe9a02d1efc398b5c19a29e3bb04c877c5a2f6b61aa562e1be8412549ea3b4 not found: ID does not exist" Nov 26 14:24:14 crc kubenswrapper[5037]: I1126 14:24:14.540945 5037 scope.go:117] "RemoveContainer" containerID="092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8" Nov 26 14:24:14 crc kubenswrapper[5037]: E1126 14:24:14.541390 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\": container with ID starting with 092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8 not found: ID does not exist" containerID="092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8" Nov 26 14:24:14 crc kubenswrapper[5037]: I1126 14:24:14.541435 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8"} err="failed to get container status \"092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\": rpc error: code = NotFound desc = could not find container \"092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8\": container with ID starting with 092083f7c112928add7f8bf428a91906bb42a82fdbcc80dd3f2ef493cfebc9c8 not found: ID does not exist" Nov 26 14:24:14 crc kubenswrapper[5037]: I1126 14:24:14.541453 5037 scope.go:117] "RemoveContainer" containerID="975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a" Nov 26 14:24:14 crc kubenswrapper[5037]: E1126 14:24:14.541804 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\": container with ID starting with 975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a not found: ID does not exist" containerID="975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a" Nov 26 14:24:14 crc kubenswrapper[5037]: I1126 14:24:14.541933 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a"} err="failed to get container status \"975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\": rpc error: code = NotFound desc = could not find container \"975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a\": container with ID starting with 975e9ecc0de39a4f7ccceb9c4440be49b9a46fae4b7f0647664e8b815d64ed0a not found: ID does not exist" Nov 26 14:24:14 crc kubenswrapper[5037]: I1126 14:24:14.541951 5037 scope.go:117] "RemoveContainer" containerID="b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a" Nov 26 14:24:14 crc kubenswrapper[5037]: E1126 14:24:14.542226 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\": container with ID starting with b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a not found: ID does not exist" containerID="b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a" Nov 26 14:24:14 crc kubenswrapper[5037]: I1126 14:24:14.542250 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a"} err="failed to get container status \"b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\": rpc error: code = NotFound desc = could not find container \"b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a\": container with ID starting with b09e637e387d7bd073da062d15555df2f1d1b242ad114ee07345c81fbfb89d2a not found: ID does not exist" Nov 26 14:24:14 crc kubenswrapper[5037]: I1126 14:24:14.542264 5037 scope.go:117] "RemoveContainer" containerID="b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c" Nov 26 14:24:14 crc kubenswrapper[5037]: E1126 14:24:14.542573 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\": container with ID starting with b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c not found: ID does not exist" containerID="b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c" Nov 26 14:24:14 crc kubenswrapper[5037]: I1126 14:24:14.542595 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c"} err="failed to get container status \"b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\": rpc error: code = NotFound desc = could not find container \"b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c\": container with ID starting with b20026aea4ba7b5f18f7d39d42fc885fc5894c17496aa5555e8bf8d8d3b27a1c not found: ID does not exist" Nov 26 14:24:15 crc kubenswrapper[5037]: I1126 14:24:15.917899 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Nov 26 14:24:18 crc kubenswrapper[5037]: E1126 14:24:18.800612 5037 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.69:6443: connect: connection refused" Nov 26 14:24:18 crc kubenswrapper[5037]: E1126 14:24:18.801658 5037 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.69:6443: connect: connection refused" Nov 26 14:24:18 crc kubenswrapper[5037]: E1126 14:24:18.802346 5037 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.69:6443: connect: connection refused" Nov 26 14:24:18 crc kubenswrapper[5037]: E1126 14:24:18.802719 5037 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.69:6443: connect: connection refused" Nov 26 14:24:18 crc kubenswrapper[5037]: E1126 14:24:18.803064 5037 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.69:6443: connect: connection refused" Nov 26 14:24:18 crc kubenswrapper[5037]: I1126 14:24:18.803110 5037 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Nov 26 14:24:18 crc kubenswrapper[5037]: E1126 14:24:18.803420 5037 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.69:6443: connect: connection refused" interval="200ms" Nov 26 14:24:19 crc kubenswrapper[5037]: E1126 14:24:19.006568 5037 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.69:6443: connect: connection refused" interval="400ms" Nov 26 14:24:19 crc kubenswrapper[5037]: E1126 14:24:19.408241 5037 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.69:6443: connect: connection refused" interval="800ms" Nov 26 14:24:20 crc kubenswrapper[5037]: E1126 14:24:20.209159 5037 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.69:6443: connect: connection refused" interval="1.6s" Nov 26 14:24:21 crc kubenswrapper[5037]: E1126 14:24:21.809771 5037 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.69:6443: connect: connection refused" interval="3.2s" Nov 26 14:24:21 crc kubenswrapper[5037]: E1126 14:24:21.943779 5037 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.69:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187b949630e3f67e openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-26 14:24:11.831932542 +0000 UTC m=+518.628702726,LastTimestamp:2025-11-26 14:24:11.831932542 +0000 UTC m=+518.628702726,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 26 14:24:22 crc kubenswrapper[5037]: I1126 14:24:22.101141 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" podUID="fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb" containerName="registry" containerID="cri-o://ea5c494f720ea12b84450a4cac184760c05ae2899c834297277760cdcc4d86dd" gracePeriod=30 Nov 26 14:24:22 crc kubenswrapper[5037]: I1126 14:24:22.439611 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:24:22 crc kubenswrapper[5037]: I1126 14:24:22.440786 5037 status_manager.go:851] "Failed to get status for pod" podUID="287ee161-b8a6-4dbf-b2e7-d32380e75f47" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.69:6443: connect: connection refused" Nov 26 14:24:22 crc kubenswrapper[5037]: I1126 14:24:22.441209 5037 status_manager.go:851] "Failed to get status for pod" podUID="fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb" pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-697d97f7c8-nwzvj\": dial tcp 38.102.83.69:6443: connect: connection refused" Nov 26 14:24:22 crc kubenswrapper[5037]: I1126 14:24:22.455921 5037 generic.go:334] "Generic (PLEG): container finished" podID="fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb" containerID="ea5c494f720ea12b84450a4cac184760c05ae2899c834297277760cdcc4d86dd" exitCode=0 Nov 26 14:24:22 crc kubenswrapper[5037]: I1126 14:24:22.455989 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" Nov 26 14:24:22 crc kubenswrapper[5037]: I1126 14:24:22.455983 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" event={"ID":"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb","Type":"ContainerDied","Data":"ea5c494f720ea12b84450a4cac184760c05ae2899c834297277760cdcc4d86dd"} Nov 26 14:24:22 crc kubenswrapper[5037]: I1126 14:24:22.456164 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" event={"ID":"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb","Type":"ContainerDied","Data":"2c7dad4dd4ecbd8ecad7e4a8fda1da0a3998665191422d49bde2d4500a066460"} Nov 26 14:24:22 crc kubenswrapper[5037]: I1126 14:24:22.456198 5037 scope.go:117] "RemoveContainer" containerID="ea5c494f720ea12b84450a4cac184760c05ae2899c834297277760cdcc4d86dd" Nov 26 14:24:22 crc kubenswrapper[5037]: I1126 14:24:22.456895 5037 status_manager.go:851] "Failed to get status for pod" podUID="287ee161-b8a6-4dbf-b2e7-d32380e75f47" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.69:6443: connect: connection refused" Nov 26 14:24:22 crc kubenswrapper[5037]: I1126 14:24:22.457702 5037 status_manager.go:851] "Failed to get status for pod" podUID="fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb" pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-697d97f7c8-nwzvj\": dial tcp 38.102.83.69:6443: connect: connection refused" Nov 26 14:24:22 crc kubenswrapper[5037]: I1126 14:24:22.476862 5037 scope.go:117] "RemoveContainer" containerID="ea5c494f720ea12b84450a4cac184760c05ae2899c834297277760cdcc4d86dd" Nov 26 14:24:22 crc kubenswrapper[5037]: E1126 14:24:22.477277 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ea5c494f720ea12b84450a4cac184760c05ae2899c834297277760cdcc4d86dd\": container with ID starting with ea5c494f720ea12b84450a4cac184760c05ae2899c834297277760cdcc4d86dd not found: ID does not exist" containerID="ea5c494f720ea12b84450a4cac184760c05ae2899c834297277760cdcc4d86dd" Nov 26 14:24:22 crc kubenswrapper[5037]: I1126 14:24:22.477364 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ea5c494f720ea12b84450a4cac184760c05ae2899c834297277760cdcc4d86dd"} err="failed to get container status \"ea5c494f720ea12b84450a4cac184760c05ae2899c834297277760cdcc4d86dd\": rpc error: code = NotFound desc = could not find container \"ea5c494f720ea12b84450a4cac184760c05ae2899c834297277760cdcc4d86dd\": container with ID starting with ea5c494f720ea12b84450a4cac184760c05ae2899c834297277760cdcc4d86dd not found: ID does not exist" Nov 26 14:24:22 crc kubenswrapper[5037]: I1126 14:24:22.487497 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-registry-tls\") pod \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " Nov 26 14:24:22 crc kubenswrapper[5037]: I1126 14:24:22.487766 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " Nov 26 14:24:22 crc kubenswrapper[5037]: I1126 14:24:22.487801 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-registry-certificates\") pod \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " Nov 26 14:24:22 crc kubenswrapper[5037]: I1126 14:24:22.487907 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7x8w\" (UniqueName: \"kubernetes.io/projected/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-kube-api-access-w7x8w\") pod \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " Nov 26 14:24:22 crc kubenswrapper[5037]: I1126 14:24:22.487957 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-bound-sa-token\") pod \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " Nov 26 14:24:22 crc kubenswrapper[5037]: I1126 14:24:22.487978 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-trusted-ca\") pod \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " Nov 26 14:24:22 crc kubenswrapper[5037]: I1126 14:24:22.488013 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-ca-trust-extracted\") pod \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " Nov 26 14:24:22 crc kubenswrapper[5037]: I1126 14:24:22.488094 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-installation-pull-secrets\") pod \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\" (UID: \"fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb\") " Nov 26 14:24:22 crc kubenswrapper[5037]: I1126 14:24:22.489515 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:24:22 crc kubenswrapper[5037]: I1126 14:24:22.489490 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:24:22 crc kubenswrapper[5037]: I1126 14:24:22.497709 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:24:22 crc kubenswrapper[5037]: I1126 14:24:22.497712 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-kube-api-access-w7x8w" (OuterVolumeSpecName: "kube-api-access-w7x8w") pod "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb"). InnerVolumeSpecName "kube-api-access-w7x8w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:24:22 crc kubenswrapper[5037]: I1126 14:24:22.497914 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:24:22 crc kubenswrapper[5037]: I1126 14:24:22.498395 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 26 14:24:22 crc kubenswrapper[5037]: I1126 14:24:22.499551 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:24:22 crc kubenswrapper[5037]: I1126 14:24:22.508856 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb" (UID: "fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:24:22 crc kubenswrapper[5037]: I1126 14:24:22.589815 5037 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 14:24:22 crc kubenswrapper[5037]: I1126 14:24:22.589871 5037 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 26 14:24:22 crc kubenswrapper[5037]: I1126 14:24:22.589898 5037 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 26 14:24:22 crc kubenswrapper[5037]: I1126 14:24:22.589918 5037 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 26 14:24:22 crc kubenswrapper[5037]: I1126 14:24:22.589937 5037 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 26 14:24:22 crc kubenswrapper[5037]: I1126 14:24:22.589956 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7x8w\" (UniqueName: \"kubernetes.io/projected/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-kube-api-access-w7x8w\") on node \"crc\" DevicePath \"\"" Nov 26 14:24:22 crc kubenswrapper[5037]: I1126 14:24:22.589980 5037 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 26 14:24:22 crc kubenswrapper[5037]: I1126 14:24:22.773252 5037 status_manager.go:851] "Failed to get status for pod" podUID="287ee161-b8a6-4dbf-b2e7-d32380e75f47" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.69:6443: connect: connection refused" Nov 26 14:24:22 crc kubenswrapper[5037]: I1126 14:24:22.773547 5037 status_manager.go:851] "Failed to get status for pod" podUID="fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb" pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-697d97f7c8-nwzvj\": dial tcp 38.102.83.69:6443: connect: connection refused" Nov 26 14:24:23 crc kubenswrapper[5037]: I1126 14:24:23.910735 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:24:23 crc kubenswrapper[5037]: I1126 14:24:23.910931 5037 status_manager.go:851] "Failed to get status for pod" podUID="fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb" pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-697d97f7c8-nwzvj\": dial tcp 38.102.83.69:6443: connect: connection refused" Nov 26 14:24:23 crc kubenswrapper[5037]: I1126 14:24:23.911977 5037 status_manager.go:851] "Failed to get status for pod" podUID="287ee161-b8a6-4dbf-b2e7-d32380e75f47" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.69:6443: connect: connection refused" Nov 26 14:24:23 crc kubenswrapper[5037]: I1126 14:24:23.912668 5037 status_manager.go:851] "Failed to get status for pod" podUID="287ee161-b8a6-4dbf-b2e7-d32380e75f47" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.69:6443: connect: connection refused" Nov 26 14:24:23 crc kubenswrapper[5037]: I1126 14:24:23.913260 5037 status_manager.go:851] "Failed to get status for pod" podUID="fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb" pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-697d97f7c8-nwzvj\": dial tcp 38.102.83.69:6443: connect: connection refused" Nov 26 14:24:23 crc kubenswrapper[5037]: I1126 14:24:23.931509 5037 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="821d0155-28e9-4160-8885-aa8cc1d60197" Nov 26 14:24:23 crc kubenswrapper[5037]: I1126 14:24:23.931555 5037 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="821d0155-28e9-4160-8885-aa8cc1d60197" Nov 26 14:24:23 crc kubenswrapper[5037]: E1126 14:24:23.932249 5037 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.69:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:24:23 crc kubenswrapper[5037]: I1126 14:24:23.932758 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:24:24 crc kubenswrapper[5037]: I1126 14:24:24.473257 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 26 14:24:24 crc kubenswrapper[5037]: I1126 14:24:24.473330 5037 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf" exitCode=1 Nov 26 14:24:24 crc kubenswrapper[5037]: I1126 14:24:24.473385 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf"} Nov 26 14:24:24 crc kubenswrapper[5037]: I1126 14:24:24.473859 5037 scope.go:117] "RemoveContainer" containerID="3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf" Nov 26 14:24:24 crc kubenswrapper[5037]: I1126 14:24:24.474753 5037 status_manager.go:851] "Failed to get status for pod" podUID="287ee161-b8a6-4dbf-b2e7-d32380e75f47" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.69:6443: connect: connection refused" Nov 26 14:24:24 crc kubenswrapper[5037]: I1126 14:24:24.474928 5037 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.69:6443: connect: connection refused" Nov 26 14:24:24 crc kubenswrapper[5037]: I1126 14:24:24.475280 5037 status_manager.go:851] "Failed to get status for pod" podUID="fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb" pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-697d97f7c8-nwzvj\": dial tcp 38.102.83.69:6443: connect: connection refused" Nov 26 14:24:24 crc kubenswrapper[5037]: I1126 14:24:24.477057 5037 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="16111ce3d9f744844db2a3bb8c2378b684fb8f51c7f0168f0790e31ceb98cdc2" exitCode=0 Nov 26 14:24:24 crc kubenswrapper[5037]: I1126 14:24:24.477110 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"16111ce3d9f744844db2a3bb8c2378b684fb8f51c7f0168f0790e31ceb98cdc2"} Nov 26 14:24:24 crc kubenswrapper[5037]: I1126 14:24:24.477177 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"0970c84204bb9bd04522dcc23bb8f7184335cee4576c0e5c344f45919d91cb02"} Nov 26 14:24:24 crc kubenswrapper[5037]: I1126 14:24:24.477540 5037 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="821d0155-28e9-4160-8885-aa8cc1d60197" Nov 26 14:24:24 crc kubenswrapper[5037]: I1126 14:24:24.477567 5037 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="821d0155-28e9-4160-8885-aa8cc1d60197" Nov 26 14:24:24 crc kubenswrapper[5037]: I1126 14:24:24.477811 5037 status_manager.go:851] "Failed to get status for pod" podUID="fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb" pod="openshift-image-registry/image-registry-697d97f7c8-nwzvj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-697d97f7c8-nwzvj\": dial tcp 38.102.83.69:6443: connect: connection refused" Nov 26 14:24:24 crc kubenswrapper[5037]: I1126 14:24:24.478159 5037 status_manager.go:851] "Failed to get status for pod" podUID="287ee161-b8a6-4dbf-b2e7-d32380e75f47" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.69:6443: connect: connection refused" Nov 26 14:24:24 crc kubenswrapper[5037]: E1126 14:24:24.478178 5037 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.69:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:24:24 crc kubenswrapper[5037]: I1126 14:24:24.478615 5037 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.69:6443: connect: connection refused" Nov 26 14:24:25 crc kubenswrapper[5037]: I1126 14:24:25.546968 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"d5185fef18e75e8efe83c93997304f3c7aa565dedbfc21e59025866e3f7b7151"} Nov 26 14:24:25 crc kubenswrapper[5037]: I1126 14:24:25.547445 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"4ba0090299cb118d76da874e4ea6caca664ed3799b257b5142de8d402d3f015f"} Nov 26 14:24:25 crc kubenswrapper[5037]: I1126 14:24:25.547476 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 14:24:25 crc kubenswrapper[5037]: I1126 14:24:25.547504 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"e90ef3d73e039ce29005633867ada95bc7d288cf245c94d4ef0bb71f99a1f566"} Nov 26 14:24:25 crc kubenswrapper[5037]: I1126 14:24:25.547519 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"20e4b7353bd9655fa7215e63029b3b7096c3af78c1eddb3310c43db4b17cace6"} Nov 26 14:24:25 crc kubenswrapper[5037]: I1126 14:24:25.552249 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 26 14:24:25 crc kubenswrapper[5037]: I1126 14:24:25.552337 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"f9fb40442ba14b374379f24adac9b29483b27eaddbe3af4f14e9573c104ed7bb"} Nov 26 14:24:26 crc kubenswrapper[5037]: I1126 14:24:26.564814 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"f2e974dadd3c13670fb300f82e37c0e12eaa4cdc9ac6e7b8a0c7722f9d2c95b6"} Nov 26 14:24:26 crc kubenswrapper[5037]: I1126 14:24:26.565243 5037 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="821d0155-28e9-4160-8885-aa8cc1d60197" Nov 26 14:24:26 crc kubenswrapper[5037]: I1126 14:24:26.565311 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:24:26 crc kubenswrapper[5037]: I1126 14:24:26.565317 5037 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="821d0155-28e9-4160-8885-aa8cc1d60197" Nov 26 14:24:28 crc kubenswrapper[5037]: I1126 14:24:28.933944 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:24:28 crc kubenswrapper[5037]: I1126 14:24:28.934473 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:24:28 crc kubenswrapper[5037]: I1126 14:24:28.942671 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:24:29 crc kubenswrapper[5037]: I1126 14:24:29.316320 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 14:24:31 crc kubenswrapper[5037]: I1126 14:24:31.575690 5037 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:24:31 crc kubenswrapper[5037]: I1126 14:24:31.599966 5037 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="821d0155-28e9-4160-8885-aa8cc1d60197" Nov 26 14:24:31 crc kubenswrapper[5037]: I1126 14:24:31.600026 5037 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="821d0155-28e9-4160-8885-aa8cc1d60197" Nov 26 14:24:31 crc kubenswrapper[5037]: I1126 14:24:31.604104 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:24:32 crc kubenswrapper[5037]: I1126 14:24:32.605786 5037 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="821d0155-28e9-4160-8885-aa8cc1d60197" Nov 26 14:24:32 crc kubenswrapper[5037]: I1126 14:24:32.606259 5037 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="821d0155-28e9-4160-8885-aa8cc1d60197" Nov 26 14:24:33 crc kubenswrapper[5037]: I1126 14:24:33.923694 5037 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="164bea80-cdd6-41f1-ab4a-039605cff21b" Nov 26 14:24:35 crc kubenswrapper[5037]: I1126 14:24:35.547545 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 14:24:35 crc kubenswrapper[5037]: I1126 14:24:35.547817 5037 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Nov 26 14:24:35 crc kubenswrapper[5037]: I1126 14:24:35.548140 5037 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Nov 26 14:24:40 crc kubenswrapper[5037]: I1126 14:24:40.649394 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 26 14:24:41 crc kubenswrapper[5037]: I1126 14:24:41.247222 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 14:24:41 crc kubenswrapper[5037]: I1126 14:24:41.247318 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 14:24:42 crc kubenswrapper[5037]: I1126 14:24:42.912823 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 26 14:24:43 crc kubenswrapper[5037]: I1126 14:24:43.285654 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 26 14:24:43 crc kubenswrapper[5037]: I1126 14:24:43.447198 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 26 14:24:43 crc kubenswrapper[5037]: I1126 14:24:43.577022 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 26 14:24:43 crc kubenswrapper[5037]: I1126 14:24:43.667727 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 26 14:24:43 crc kubenswrapper[5037]: I1126 14:24:43.667881 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 26 14:24:43 crc kubenswrapper[5037]: I1126 14:24:43.993492 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 26 14:24:44 crc kubenswrapper[5037]: I1126 14:24:44.058422 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 26 14:24:44 crc kubenswrapper[5037]: I1126 14:24:44.214105 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 26 14:24:44 crc kubenswrapper[5037]: I1126 14:24:44.282247 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 26 14:24:44 crc kubenswrapper[5037]: I1126 14:24:44.342997 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 26 14:24:44 crc kubenswrapper[5037]: I1126 14:24:44.671807 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 26 14:24:44 crc kubenswrapper[5037]: I1126 14:24:44.832709 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 26 14:24:44 crc kubenswrapper[5037]: I1126 14:24:44.979555 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 26 14:24:45 crc kubenswrapper[5037]: I1126 14:24:45.060230 5037 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 26 14:24:45 crc kubenswrapper[5037]: I1126 14:24:45.094106 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 26 14:24:45 crc kubenswrapper[5037]: I1126 14:24:45.108821 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 26 14:24:45 crc kubenswrapper[5037]: I1126 14:24:45.187075 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 26 14:24:45 crc kubenswrapper[5037]: I1126 14:24:45.216628 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 26 14:24:45 crc kubenswrapper[5037]: I1126 14:24:45.247933 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 26 14:24:45 crc kubenswrapper[5037]: I1126 14:24:45.345991 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 26 14:24:45 crc kubenswrapper[5037]: I1126 14:24:45.402393 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 26 14:24:45 crc kubenswrapper[5037]: I1126 14:24:45.547595 5037 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Nov 26 14:24:45 crc kubenswrapper[5037]: I1126 14:24:45.547748 5037 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Nov 26 14:24:45 crc kubenswrapper[5037]: I1126 14:24:45.634005 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 26 14:24:45 crc kubenswrapper[5037]: I1126 14:24:45.729008 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 26 14:24:45 crc kubenswrapper[5037]: I1126 14:24:45.730549 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 26 14:24:45 crc kubenswrapper[5037]: I1126 14:24:45.760299 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 26 14:24:45 crc kubenswrapper[5037]: I1126 14:24:45.806954 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 26 14:24:45 crc kubenswrapper[5037]: I1126 14:24:45.888352 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 26 14:24:45 crc kubenswrapper[5037]: I1126 14:24:45.936513 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 26 14:24:46 crc kubenswrapper[5037]: I1126 14:24:46.012232 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 26 14:24:46 crc kubenswrapper[5037]: I1126 14:24:46.024809 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 26 14:24:46 crc kubenswrapper[5037]: I1126 14:24:46.178154 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 26 14:24:46 crc kubenswrapper[5037]: I1126 14:24:46.245169 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 26 14:24:46 crc kubenswrapper[5037]: I1126 14:24:46.287599 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 26 14:24:46 crc kubenswrapper[5037]: I1126 14:24:46.327023 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 26 14:24:46 crc kubenswrapper[5037]: I1126 14:24:46.365463 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 26 14:24:46 crc kubenswrapper[5037]: I1126 14:24:46.386555 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 26 14:24:46 crc kubenswrapper[5037]: I1126 14:24:46.388603 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 26 14:24:46 crc kubenswrapper[5037]: I1126 14:24:46.510176 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 26 14:24:46 crc kubenswrapper[5037]: I1126 14:24:46.562589 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 26 14:24:46 crc kubenswrapper[5037]: I1126 14:24:46.588712 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 26 14:24:46 crc kubenswrapper[5037]: I1126 14:24:46.612938 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 26 14:24:46 crc kubenswrapper[5037]: I1126 14:24:46.631559 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 26 14:24:46 crc kubenswrapper[5037]: I1126 14:24:46.738387 5037 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 26 14:24:46 crc kubenswrapper[5037]: I1126 14:24:46.743630 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-nwzvj","openshift-kube-apiserver/kube-apiserver-crc"] Nov 26 14:24:46 crc kubenswrapper[5037]: I1126 14:24:46.743694 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 26 14:24:46 crc kubenswrapper[5037]: I1126 14:24:46.747865 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 14:24:46 crc kubenswrapper[5037]: I1126 14:24:46.749062 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 26 14:24:46 crc kubenswrapper[5037]: I1126 14:24:46.774300 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=15.77425971 podStartE2EDuration="15.77425971s" podCreationTimestamp="2025-11-26 14:24:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:24:46.771160334 +0000 UTC m=+553.567930558" watchObservedRunningTime="2025-11-26 14:24:46.77425971 +0000 UTC m=+553.571029904" Nov 26 14:24:46 crc kubenswrapper[5037]: I1126 14:24:46.812239 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 26 14:24:46 crc kubenswrapper[5037]: I1126 14:24:46.855675 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 26 14:24:46 crc kubenswrapper[5037]: I1126 14:24:46.865966 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 26 14:24:46 crc kubenswrapper[5037]: I1126 14:24:46.876702 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 26 14:24:46 crc kubenswrapper[5037]: I1126 14:24:46.884857 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 26 14:24:46 crc kubenswrapper[5037]: I1126 14:24:46.989064 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 26 14:24:47 crc kubenswrapper[5037]: I1126 14:24:47.035374 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 26 14:24:47 crc kubenswrapper[5037]: I1126 14:24:47.147442 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 26 14:24:47 crc kubenswrapper[5037]: I1126 14:24:47.212255 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 26 14:24:47 crc kubenswrapper[5037]: I1126 14:24:47.356330 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 26 14:24:47 crc kubenswrapper[5037]: I1126 14:24:47.380235 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 26 14:24:47 crc kubenswrapper[5037]: I1126 14:24:47.421447 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 26 14:24:47 crc kubenswrapper[5037]: I1126 14:24:47.428937 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 26 14:24:47 crc kubenswrapper[5037]: I1126 14:24:47.514484 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 26 14:24:47 crc kubenswrapper[5037]: I1126 14:24:47.729706 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 26 14:24:47 crc kubenswrapper[5037]: I1126 14:24:47.747116 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 26 14:24:47 crc kubenswrapper[5037]: I1126 14:24:47.769069 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 26 14:24:47 crc kubenswrapper[5037]: I1126 14:24:47.770579 5037 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 26 14:24:47 crc kubenswrapper[5037]: I1126 14:24:47.855412 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 26 14:24:47 crc kubenswrapper[5037]: I1126 14:24:47.884166 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 26 14:24:47 crc kubenswrapper[5037]: I1126 14:24:47.921998 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb" path="/var/lib/kubelet/pods/fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb/volumes" Nov 26 14:24:47 crc kubenswrapper[5037]: I1126 14:24:47.950268 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 26 14:24:47 crc kubenswrapper[5037]: I1126 14:24:47.957138 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 26 14:24:48 crc kubenswrapper[5037]: I1126 14:24:48.056956 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 26 14:24:48 crc kubenswrapper[5037]: I1126 14:24:48.142727 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 26 14:24:48 crc kubenswrapper[5037]: I1126 14:24:48.166274 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 26 14:24:48 crc kubenswrapper[5037]: I1126 14:24:48.169440 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 26 14:24:48 crc kubenswrapper[5037]: I1126 14:24:48.209409 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 26 14:24:48 crc kubenswrapper[5037]: I1126 14:24:48.245899 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 26 14:24:48 crc kubenswrapper[5037]: I1126 14:24:48.254543 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 26 14:24:48 crc kubenswrapper[5037]: I1126 14:24:48.313806 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 26 14:24:48 crc kubenswrapper[5037]: I1126 14:24:48.375816 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 26 14:24:48 crc kubenswrapper[5037]: I1126 14:24:48.511949 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 26 14:24:48 crc kubenswrapper[5037]: I1126 14:24:48.596040 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 26 14:24:48 crc kubenswrapper[5037]: I1126 14:24:48.600042 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 26 14:24:48 crc kubenswrapper[5037]: I1126 14:24:48.607816 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 26 14:24:48 crc kubenswrapper[5037]: I1126 14:24:48.625908 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 26 14:24:48 crc kubenswrapper[5037]: I1126 14:24:48.684887 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 26 14:24:48 crc kubenswrapper[5037]: I1126 14:24:48.727604 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 26 14:24:48 crc kubenswrapper[5037]: I1126 14:24:48.727877 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 26 14:24:48 crc kubenswrapper[5037]: I1126 14:24:48.756977 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 26 14:24:48 crc kubenswrapper[5037]: I1126 14:24:48.788509 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 26 14:24:48 crc kubenswrapper[5037]: I1126 14:24:48.854967 5037 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 26 14:24:48 crc kubenswrapper[5037]: I1126 14:24:48.872674 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 26 14:24:48 crc kubenswrapper[5037]: I1126 14:24:48.892790 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 26 14:24:49 crc kubenswrapper[5037]: I1126 14:24:49.057419 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 26 14:24:49 crc kubenswrapper[5037]: I1126 14:24:49.138969 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 26 14:24:49 crc kubenswrapper[5037]: I1126 14:24:49.171023 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 26 14:24:49 crc kubenswrapper[5037]: I1126 14:24:49.321591 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 26 14:24:49 crc kubenswrapper[5037]: I1126 14:24:49.365149 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 26 14:24:49 crc kubenswrapper[5037]: I1126 14:24:49.378121 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 26 14:24:49 crc kubenswrapper[5037]: I1126 14:24:49.394369 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 26 14:24:49 crc kubenswrapper[5037]: I1126 14:24:49.399452 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 26 14:24:49 crc kubenswrapper[5037]: I1126 14:24:49.462119 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 26 14:24:49 crc kubenswrapper[5037]: I1126 14:24:49.598702 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 26 14:24:49 crc kubenswrapper[5037]: I1126 14:24:49.634753 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 26 14:24:49 crc kubenswrapper[5037]: I1126 14:24:49.647127 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 26 14:24:49 crc kubenswrapper[5037]: I1126 14:24:49.713165 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 26 14:24:49 crc kubenswrapper[5037]: I1126 14:24:49.751428 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 26 14:24:49 crc kubenswrapper[5037]: I1126 14:24:49.767361 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 26 14:24:49 crc kubenswrapper[5037]: I1126 14:24:49.808435 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 26 14:24:49 crc kubenswrapper[5037]: I1126 14:24:49.843027 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 26 14:24:49 crc kubenswrapper[5037]: I1126 14:24:49.871061 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 26 14:24:49 crc kubenswrapper[5037]: I1126 14:24:49.871477 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 26 14:24:49 crc kubenswrapper[5037]: I1126 14:24:49.874830 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 26 14:24:49 crc kubenswrapper[5037]: I1126 14:24:49.908026 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 26 14:24:49 crc kubenswrapper[5037]: I1126 14:24:49.934638 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 26 14:24:49 crc kubenswrapper[5037]: I1126 14:24:49.946816 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 26 14:24:49 crc kubenswrapper[5037]: I1126 14:24:49.994027 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 26 14:24:50 crc kubenswrapper[5037]: I1126 14:24:50.042588 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 26 14:24:50 crc kubenswrapper[5037]: I1126 14:24:50.193511 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 26 14:24:50 crc kubenswrapper[5037]: I1126 14:24:50.203436 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 26 14:24:50 crc kubenswrapper[5037]: I1126 14:24:50.344700 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 26 14:24:50 crc kubenswrapper[5037]: I1126 14:24:50.446919 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 26 14:24:50 crc kubenswrapper[5037]: I1126 14:24:50.475767 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 26 14:24:50 crc kubenswrapper[5037]: I1126 14:24:50.477721 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 26 14:24:50 crc kubenswrapper[5037]: I1126 14:24:50.538496 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 26 14:24:50 crc kubenswrapper[5037]: I1126 14:24:50.614551 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 26 14:24:50 crc kubenswrapper[5037]: I1126 14:24:50.622883 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 26 14:24:50 crc kubenswrapper[5037]: I1126 14:24:50.838132 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 26 14:24:50 crc kubenswrapper[5037]: I1126 14:24:50.842438 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 26 14:24:50 crc kubenswrapper[5037]: I1126 14:24:50.864490 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 26 14:24:51 crc kubenswrapper[5037]: I1126 14:24:51.049389 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 26 14:24:51 crc kubenswrapper[5037]: I1126 14:24:51.114837 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 26 14:24:51 crc kubenswrapper[5037]: I1126 14:24:51.121064 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 26 14:24:51 crc kubenswrapper[5037]: I1126 14:24:51.171022 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 26 14:24:51 crc kubenswrapper[5037]: I1126 14:24:51.230702 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 26 14:24:51 crc kubenswrapper[5037]: I1126 14:24:51.259243 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 26 14:24:51 crc kubenswrapper[5037]: I1126 14:24:51.300010 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 26 14:24:51 crc kubenswrapper[5037]: I1126 14:24:51.329938 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 26 14:24:51 crc kubenswrapper[5037]: I1126 14:24:51.342086 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 26 14:24:51 crc kubenswrapper[5037]: I1126 14:24:51.380457 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 26 14:24:51 crc kubenswrapper[5037]: I1126 14:24:51.468971 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 26 14:24:51 crc kubenswrapper[5037]: I1126 14:24:51.486806 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 26 14:24:51 crc kubenswrapper[5037]: I1126 14:24:51.521704 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 26 14:24:51 crc kubenswrapper[5037]: I1126 14:24:51.552544 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 26 14:24:51 crc kubenswrapper[5037]: I1126 14:24:51.585662 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 26 14:24:51 crc kubenswrapper[5037]: I1126 14:24:51.619277 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 26 14:24:51 crc kubenswrapper[5037]: I1126 14:24:51.727585 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 26 14:24:51 crc kubenswrapper[5037]: I1126 14:24:51.825737 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 26 14:24:51 crc kubenswrapper[5037]: I1126 14:24:51.901253 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 26 14:24:51 crc kubenswrapper[5037]: I1126 14:24:51.944589 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 26 14:24:51 crc kubenswrapper[5037]: I1126 14:24:51.945750 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 26 14:24:52 crc kubenswrapper[5037]: I1126 14:24:52.041847 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 26 14:24:52 crc kubenswrapper[5037]: I1126 14:24:52.295154 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 26 14:24:52 crc kubenswrapper[5037]: I1126 14:24:52.332649 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 26 14:24:52 crc kubenswrapper[5037]: I1126 14:24:52.384310 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 26 14:24:52 crc kubenswrapper[5037]: I1126 14:24:52.387407 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 26 14:24:52 crc kubenswrapper[5037]: I1126 14:24:52.406943 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 26 14:24:52 crc kubenswrapper[5037]: I1126 14:24:52.509614 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 26 14:24:52 crc kubenswrapper[5037]: I1126 14:24:52.601944 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 26 14:24:52 crc kubenswrapper[5037]: I1126 14:24:52.622607 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 26 14:24:52 crc kubenswrapper[5037]: I1126 14:24:52.636831 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 26 14:24:52 crc kubenswrapper[5037]: I1126 14:24:52.673745 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 26 14:24:52 crc kubenswrapper[5037]: I1126 14:24:52.785255 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 26 14:24:52 crc kubenswrapper[5037]: I1126 14:24:52.842762 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 26 14:24:52 crc kubenswrapper[5037]: I1126 14:24:52.846419 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 26 14:24:52 crc kubenswrapper[5037]: I1126 14:24:52.890794 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 26 14:24:52 crc kubenswrapper[5037]: I1126 14:24:52.904009 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 26 14:24:52 crc kubenswrapper[5037]: I1126 14:24:52.912760 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 26 14:24:53 crc kubenswrapper[5037]: I1126 14:24:53.108413 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 26 14:24:53 crc kubenswrapper[5037]: I1126 14:24:53.319253 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 26 14:24:53 crc kubenswrapper[5037]: I1126 14:24:53.404947 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 26 14:24:53 crc kubenswrapper[5037]: I1126 14:24:53.404975 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 26 14:24:53 crc kubenswrapper[5037]: I1126 14:24:53.437794 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 26 14:24:53 crc kubenswrapper[5037]: I1126 14:24:53.455338 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 26 14:24:53 crc kubenswrapper[5037]: I1126 14:24:53.471263 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 26 14:24:53 crc kubenswrapper[5037]: I1126 14:24:53.920905 5037 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 26 14:24:53 crc kubenswrapper[5037]: I1126 14:24:53.921259 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://eb8d9e8fc948bdfa392c4f090012c1962aaad4ff957160f49ebc9d923801c698" gracePeriod=5 Nov 26 14:24:53 crc kubenswrapper[5037]: I1126 14:24:53.929446 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 26 14:24:53 crc kubenswrapper[5037]: I1126 14:24:53.940329 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 26 14:24:53 crc kubenswrapper[5037]: I1126 14:24:53.945370 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 26 14:24:53 crc kubenswrapper[5037]: I1126 14:24:53.976481 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 26 14:24:53 crc kubenswrapper[5037]: I1126 14:24:53.985802 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 26 14:24:54 crc kubenswrapper[5037]: I1126 14:24:54.039799 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 26 14:24:54 crc kubenswrapper[5037]: I1126 14:24:54.042743 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 26 14:24:54 crc kubenswrapper[5037]: I1126 14:24:54.086732 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 26 14:24:54 crc kubenswrapper[5037]: I1126 14:24:54.135037 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 26 14:24:54 crc kubenswrapper[5037]: I1126 14:24:54.163394 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 26 14:24:54 crc kubenswrapper[5037]: I1126 14:24:54.214447 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 26 14:24:54 crc kubenswrapper[5037]: I1126 14:24:54.278271 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 26 14:24:54 crc kubenswrapper[5037]: I1126 14:24:54.279754 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 26 14:24:54 crc kubenswrapper[5037]: I1126 14:24:54.281338 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 26 14:24:54 crc kubenswrapper[5037]: I1126 14:24:54.288829 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 26 14:24:54 crc kubenswrapper[5037]: I1126 14:24:54.296626 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 26 14:24:54 crc kubenswrapper[5037]: I1126 14:24:54.325558 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 26 14:24:54 crc kubenswrapper[5037]: I1126 14:24:54.364966 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 26 14:24:54 crc kubenswrapper[5037]: I1126 14:24:54.445552 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 26 14:24:54 crc kubenswrapper[5037]: I1126 14:24:54.474933 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 26 14:24:54 crc kubenswrapper[5037]: I1126 14:24:54.484769 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 26 14:24:54 crc kubenswrapper[5037]: I1126 14:24:54.611639 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 26 14:24:54 crc kubenswrapper[5037]: I1126 14:24:54.622394 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 26 14:24:54 crc kubenswrapper[5037]: I1126 14:24:54.648516 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 26 14:24:54 crc kubenswrapper[5037]: I1126 14:24:54.671842 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 26 14:24:54 crc kubenswrapper[5037]: I1126 14:24:54.781346 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 26 14:24:54 crc kubenswrapper[5037]: I1126 14:24:54.789859 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 26 14:24:54 crc kubenswrapper[5037]: I1126 14:24:54.866370 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 26 14:24:54 crc kubenswrapper[5037]: I1126 14:24:54.960235 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.005844 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.030256 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.081399 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.104865 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.257231 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.306556 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.315878 5037 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.409014 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wb4bw"] Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.409730 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-wb4bw" podUID="bf9611ba-47f1-43da-92fc-a4f99606500a" containerName="registry-server" containerID="cri-o://9e6a7138f86d07f6ec2848f6a1aa2aa1dcb7cb0e37fa067e77f20d96e0f16a19" gracePeriod=30 Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.418548 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-m8gz7"] Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.418898 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-m8gz7" podUID="f379a727-1bc7-469d-8148-b7fb1abb5155" containerName="registry-server" containerID="cri-o://25ac19827c3815eae23d456da47c5d846d63b191e05808a65c074afa3f4984b0" gracePeriod=30 Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.429791 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.437537 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-txjgw"] Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.437754 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-txjgw" podUID="8b2f4cca-09b7-44dc-9458-298b0e3c8507" containerName="marketplace-operator" containerID="cri-o://2fff1264478bbaef430520300794f4610677c34f3c9285de0cd18e71b13117b2" gracePeriod=30 Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.456839 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-msn7s"] Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.457156 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-msn7s" podUID="e33b5e20-dd02-4850-b59b-40a271de1b3f" containerName="registry-server" containerID="cri-o://1af61d3729cdc507cd1d56e096b3b50561f5ebbeefa85d78043fda571e830c47" gracePeriod=30 Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.461787 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-b5sj7"] Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.462312 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-b5sj7" podUID="570f926c-8f52-4b77-a139-bfa9d3b61071" containerName="registry-server" containerID="cri-o://b8db39bb5ad4b54572b8df0d1e0361e5c360a34a7b38e8aafcd252f318944683" gracePeriod=30 Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.473658 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.548120 5037 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.548265 5037 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.548470 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.550669 5037 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="kube-controller-manager" containerStatusID={"Type":"cri-o","ID":"f9fb40442ba14b374379f24adac9b29483b27eaddbe3af4f14e9573c104ed7bb"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container kube-controller-manager failed startup probe, will be restarted" Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.550940 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" containerID="cri-o://f9fb40442ba14b374379f24adac9b29483b27eaddbe3af4f14e9573c104ed7bb" gracePeriod=30 Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.556480 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.577131 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.661277 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.714241 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.751109 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.782350 5037 generic.go:334] "Generic (PLEG): container finished" podID="bf9611ba-47f1-43da-92fc-a4f99606500a" containerID="9e6a7138f86d07f6ec2848f6a1aa2aa1dcb7cb0e37fa067e77f20d96e0f16a19" exitCode=0 Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.782465 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wb4bw" event={"ID":"bf9611ba-47f1-43da-92fc-a4f99606500a","Type":"ContainerDied","Data":"9e6a7138f86d07f6ec2848f6a1aa2aa1dcb7cb0e37fa067e77f20d96e0f16a19"} Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.785352 5037 generic.go:334] "Generic (PLEG): container finished" podID="f379a727-1bc7-469d-8148-b7fb1abb5155" containerID="25ac19827c3815eae23d456da47c5d846d63b191e05808a65c074afa3f4984b0" exitCode=0 Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.785436 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m8gz7" event={"ID":"f379a727-1bc7-469d-8148-b7fb1abb5155","Type":"ContainerDied","Data":"25ac19827c3815eae23d456da47c5d846d63b191e05808a65c074afa3f4984b0"} Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.785460 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m8gz7" event={"ID":"f379a727-1bc7-469d-8148-b7fb1abb5155","Type":"ContainerDied","Data":"7af3924c559a9f71ac43053c217ae871caef7850c211e83d9b23e7060d147b84"} Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.785480 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7af3924c559a9f71ac43053c217ae871caef7850c211e83d9b23e7060d147b84" Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.791182 5037 generic.go:334] "Generic (PLEG): container finished" podID="570f926c-8f52-4b77-a139-bfa9d3b61071" containerID="b8db39bb5ad4b54572b8df0d1e0361e5c360a34a7b38e8aafcd252f318944683" exitCode=0 Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.791273 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5sj7" event={"ID":"570f926c-8f52-4b77-a139-bfa9d3b61071","Type":"ContainerDied","Data":"b8db39bb5ad4b54572b8df0d1e0361e5c360a34a7b38e8aafcd252f318944683"} Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.794167 5037 generic.go:334] "Generic (PLEG): container finished" podID="8b2f4cca-09b7-44dc-9458-298b0e3c8507" containerID="2fff1264478bbaef430520300794f4610677c34f3c9285de0cd18e71b13117b2" exitCode=0 Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.794209 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-txjgw" event={"ID":"8b2f4cca-09b7-44dc-9458-298b0e3c8507","Type":"ContainerDied","Data":"2fff1264478bbaef430520300794f4610677c34f3c9285de0cd18e71b13117b2"} Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.796130 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-m8gz7" Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.797152 5037 generic.go:334] "Generic (PLEG): container finished" podID="e33b5e20-dd02-4850-b59b-40a271de1b3f" containerID="1af61d3729cdc507cd1d56e096b3b50561f5ebbeefa85d78043fda571e830c47" exitCode=0 Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.797181 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-msn7s" event={"ID":"e33b5e20-dd02-4850-b59b-40a271de1b3f","Type":"ContainerDied","Data":"1af61d3729cdc507cd1d56e096b3b50561f5ebbeefa85d78043fda571e830c47"} Nov 26 14:24:55 crc kubenswrapper[5037]: E1126 14:24:55.838588 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b8db39bb5ad4b54572b8df0d1e0361e5c360a34a7b38e8aafcd252f318944683 is running failed: container process not found" containerID="b8db39bb5ad4b54572b8df0d1e0361e5c360a34a7b38e8aafcd252f318944683" cmd=["grpc_health_probe","-addr=:50051"] Nov 26 14:24:55 crc kubenswrapper[5037]: E1126 14:24:55.840030 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b8db39bb5ad4b54572b8df0d1e0361e5c360a34a7b38e8aafcd252f318944683 is running failed: container process not found" containerID="b8db39bb5ad4b54572b8df0d1e0361e5c360a34a7b38e8aafcd252f318944683" cmd=["grpc_health_probe","-addr=:50051"] Nov 26 14:24:55 crc kubenswrapper[5037]: E1126 14:24:55.840685 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b8db39bb5ad4b54572b8df0d1e0361e5c360a34a7b38e8aafcd252f318944683 is running failed: container process not found" containerID="b8db39bb5ad4b54572b8df0d1e0361e5c360a34a7b38e8aafcd252f318944683" cmd=["grpc_health_probe","-addr=:50051"] Nov 26 14:24:55 crc kubenswrapper[5037]: E1126 14:24:55.840790 5037 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b8db39bb5ad4b54572b8df0d1e0361e5c360a34a7b38e8aafcd252f318944683 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-operators-b5sj7" podUID="570f926c-8f52-4b77-a139-bfa9d3b61071" containerName="registry-server" Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.862313 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.870588 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wb4bw" Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.931485 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-msn7s" Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.936415 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b5sj7" Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.941276 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-txjgw" Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.948684 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.982736 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f379a727-1bc7-469d-8148-b7fb1abb5155-catalog-content\") pod \"f379a727-1bc7-469d-8148-b7fb1abb5155\" (UID: \"f379a727-1bc7-469d-8148-b7fb1abb5155\") " Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.982805 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf9611ba-47f1-43da-92fc-a4f99606500a-catalog-content\") pod \"bf9611ba-47f1-43da-92fc-a4f99606500a\" (UID: \"bf9611ba-47f1-43da-92fc-a4f99606500a\") " Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.982869 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nmgjr\" (UniqueName: \"kubernetes.io/projected/f379a727-1bc7-469d-8148-b7fb1abb5155-kube-api-access-nmgjr\") pod \"f379a727-1bc7-469d-8148-b7fb1abb5155\" (UID: \"f379a727-1bc7-469d-8148-b7fb1abb5155\") " Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.982911 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf9611ba-47f1-43da-92fc-a4f99606500a-utilities\") pod \"bf9611ba-47f1-43da-92fc-a4f99606500a\" (UID: \"bf9611ba-47f1-43da-92fc-a4f99606500a\") " Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.982964 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m4fmh\" (UniqueName: \"kubernetes.io/projected/bf9611ba-47f1-43da-92fc-a4f99606500a-kube-api-access-m4fmh\") pod \"bf9611ba-47f1-43da-92fc-a4f99606500a\" (UID: \"bf9611ba-47f1-43da-92fc-a4f99606500a\") " Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.983016 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f379a727-1bc7-469d-8148-b7fb1abb5155-utilities\") pod \"f379a727-1bc7-469d-8148-b7fb1abb5155\" (UID: \"f379a727-1bc7-469d-8148-b7fb1abb5155\") " Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.984814 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f379a727-1bc7-469d-8148-b7fb1abb5155-utilities" (OuterVolumeSpecName: "utilities") pod "f379a727-1bc7-469d-8148-b7fb1abb5155" (UID: "f379a727-1bc7-469d-8148-b7fb1abb5155"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.987806 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bf9611ba-47f1-43da-92fc-a4f99606500a-utilities" (OuterVolumeSpecName: "utilities") pod "bf9611ba-47f1-43da-92fc-a4f99606500a" (UID: "bf9611ba-47f1-43da-92fc-a4f99606500a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.990536 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f379a727-1bc7-469d-8148-b7fb1abb5155-kube-api-access-nmgjr" (OuterVolumeSpecName: "kube-api-access-nmgjr") pod "f379a727-1bc7-469d-8148-b7fb1abb5155" (UID: "f379a727-1bc7-469d-8148-b7fb1abb5155"). InnerVolumeSpecName "kube-api-access-nmgjr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:24:55 crc kubenswrapper[5037]: I1126 14:24:55.990774 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf9611ba-47f1-43da-92fc-a4f99606500a-kube-api-access-m4fmh" (OuterVolumeSpecName: "kube-api-access-m4fmh") pod "bf9611ba-47f1-43da-92fc-a4f99606500a" (UID: "bf9611ba-47f1-43da-92fc-a4f99606500a"). InnerVolumeSpecName "kube-api-access-m4fmh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.033357 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.048573 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bf9611ba-47f1-43da-92fc-a4f99606500a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bf9611ba-47f1-43da-92fc-a4f99606500a" (UID: "bf9611ba-47f1-43da-92fc-a4f99606500a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.084093 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/8b2f4cca-09b7-44dc-9458-298b0e3c8507-marketplace-operator-metrics\") pod \"8b2f4cca-09b7-44dc-9458-298b0e3c8507\" (UID: \"8b2f4cca-09b7-44dc-9458-298b0e3c8507\") " Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.084206 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8b2f4cca-09b7-44dc-9458-298b0e3c8507-marketplace-trusted-ca\") pod \"8b2f4cca-09b7-44dc-9458-298b0e3c8507\" (UID: \"8b2f4cca-09b7-44dc-9458-298b0e3c8507\") " Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.084258 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-47ck9\" (UniqueName: \"kubernetes.io/projected/8b2f4cca-09b7-44dc-9458-298b0e3c8507-kube-api-access-47ck9\") pod \"8b2f4cca-09b7-44dc-9458-298b0e3c8507\" (UID: \"8b2f4cca-09b7-44dc-9458-298b0e3c8507\") " Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.084356 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e33b5e20-dd02-4850-b59b-40a271de1b3f-utilities\") pod \"e33b5e20-dd02-4850-b59b-40a271de1b3f\" (UID: \"e33b5e20-dd02-4850-b59b-40a271de1b3f\") " Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.084400 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s8qwc\" (UniqueName: \"kubernetes.io/projected/570f926c-8f52-4b77-a139-bfa9d3b61071-kube-api-access-s8qwc\") pod \"570f926c-8f52-4b77-a139-bfa9d3b61071\" (UID: \"570f926c-8f52-4b77-a139-bfa9d3b61071\") " Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.084457 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e33b5e20-dd02-4850-b59b-40a271de1b3f-catalog-content\") pod \"e33b5e20-dd02-4850-b59b-40a271de1b3f\" (UID: \"e33b5e20-dd02-4850-b59b-40a271de1b3f\") " Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.084860 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/570f926c-8f52-4b77-a139-bfa9d3b61071-catalog-content\") pod \"570f926c-8f52-4b77-a139-bfa9d3b61071\" (UID: \"570f926c-8f52-4b77-a139-bfa9d3b61071\") " Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.084963 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/570f926c-8f52-4b77-a139-bfa9d3b61071-utilities\") pod \"570f926c-8f52-4b77-a139-bfa9d3b61071\" (UID: \"570f926c-8f52-4b77-a139-bfa9d3b61071\") " Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.085023 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jxksz\" (UniqueName: \"kubernetes.io/projected/e33b5e20-dd02-4850-b59b-40a271de1b3f-kube-api-access-jxksz\") pod \"e33b5e20-dd02-4850-b59b-40a271de1b3f\" (UID: \"e33b5e20-dd02-4850-b59b-40a271de1b3f\") " Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.085233 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8b2f4cca-09b7-44dc-9458-298b0e3c8507-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "8b2f4cca-09b7-44dc-9458-298b0e3c8507" (UID: "8b2f4cca-09b7-44dc-9458-298b0e3c8507"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.085402 5037 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8b2f4cca-09b7-44dc-9458-298b0e3c8507-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.085574 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf9611ba-47f1-43da-92fc-a4f99606500a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.085599 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nmgjr\" (UniqueName: \"kubernetes.io/projected/f379a727-1bc7-469d-8148-b7fb1abb5155-kube-api-access-nmgjr\") on node \"crc\" DevicePath \"\"" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.085617 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf9611ba-47f1-43da-92fc-a4f99606500a-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.085633 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m4fmh\" (UniqueName: \"kubernetes.io/projected/bf9611ba-47f1-43da-92fc-a4f99606500a-kube-api-access-m4fmh\") on node \"crc\" DevicePath \"\"" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.085649 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f379a727-1bc7-469d-8148-b7fb1abb5155-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.085717 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e33b5e20-dd02-4850-b59b-40a271de1b3f-utilities" (OuterVolumeSpecName: "utilities") pod "e33b5e20-dd02-4850-b59b-40a271de1b3f" (UID: "e33b5e20-dd02-4850-b59b-40a271de1b3f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.085999 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/570f926c-8f52-4b77-a139-bfa9d3b61071-utilities" (OuterVolumeSpecName: "utilities") pod "570f926c-8f52-4b77-a139-bfa9d3b61071" (UID: "570f926c-8f52-4b77-a139-bfa9d3b61071"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.086512 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f379a727-1bc7-469d-8148-b7fb1abb5155-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f379a727-1bc7-469d-8148-b7fb1abb5155" (UID: "f379a727-1bc7-469d-8148-b7fb1abb5155"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.089816 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8b2f4cca-09b7-44dc-9458-298b0e3c8507-kube-api-access-47ck9" (OuterVolumeSpecName: "kube-api-access-47ck9") pod "8b2f4cca-09b7-44dc-9458-298b0e3c8507" (UID: "8b2f4cca-09b7-44dc-9458-298b0e3c8507"). InnerVolumeSpecName "kube-api-access-47ck9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.089961 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e33b5e20-dd02-4850-b59b-40a271de1b3f-kube-api-access-jxksz" (OuterVolumeSpecName: "kube-api-access-jxksz") pod "e33b5e20-dd02-4850-b59b-40a271de1b3f" (UID: "e33b5e20-dd02-4850-b59b-40a271de1b3f"). InnerVolumeSpecName "kube-api-access-jxksz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.105497 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b2f4cca-09b7-44dc-9458-298b0e3c8507-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "8b2f4cca-09b7-44dc-9458-298b0e3c8507" (UID: "8b2f4cca-09b7-44dc-9458-298b0e3c8507"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.106725 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/570f926c-8f52-4b77-a139-bfa9d3b61071-kube-api-access-s8qwc" (OuterVolumeSpecName: "kube-api-access-s8qwc") pod "570f926c-8f52-4b77-a139-bfa9d3b61071" (UID: "570f926c-8f52-4b77-a139-bfa9d3b61071"). InnerVolumeSpecName "kube-api-access-s8qwc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.109944 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e33b5e20-dd02-4850-b59b-40a271de1b3f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e33b5e20-dd02-4850-b59b-40a271de1b3f" (UID: "e33b5e20-dd02-4850-b59b-40a271de1b3f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.187078 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/570f926c-8f52-4b77-a139-bfa9d3b61071-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.187131 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jxksz\" (UniqueName: \"kubernetes.io/projected/e33b5e20-dd02-4850-b59b-40a271de1b3f-kube-api-access-jxksz\") on node \"crc\" DevicePath \"\"" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.187150 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f379a727-1bc7-469d-8148-b7fb1abb5155-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.187162 5037 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/8b2f4cca-09b7-44dc-9458-298b0e3c8507-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.187177 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-47ck9\" (UniqueName: \"kubernetes.io/projected/8b2f4cca-09b7-44dc-9458-298b0e3c8507-kube-api-access-47ck9\") on node \"crc\" DevicePath \"\"" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.187189 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e33b5e20-dd02-4850-b59b-40a271de1b3f-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.187200 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s8qwc\" (UniqueName: \"kubernetes.io/projected/570f926c-8f52-4b77-a139-bfa9d3b61071-kube-api-access-s8qwc\") on node \"crc\" DevicePath \"\"" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.187211 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e33b5e20-dd02-4850-b59b-40a271de1b3f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.196762 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.197179 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.202099 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/570f926c-8f52-4b77-a139-bfa9d3b61071-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "570f926c-8f52-4b77-a139-bfa9d3b61071" (UID: "570f926c-8f52-4b77-a139-bfa9d3b61071"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.262793 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.289113 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/570f926c-8f52-4b77-a139-bfa9d3b61071-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.389593 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.423706 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.552144 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.593444 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.596600 5037 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.733506 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.738488 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.804211 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5sj7" event={"ID":"570f926c-8f52-4b77-a139-bfa9d3b61071","Type":"ContainerDied","Data":"3c5a6c768ffc48858417cffe4d1be10bc164e0481ebac1ff633ca442a058c50a"} Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.804275 5037 scope.go:117] "RemoveContainer" containerID="b8db39bb5ad4b54572b8df0d1e0361e5c360a34a7b38e8aafcd252f318944683" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.804453 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b5sj7" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.814934 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.815365 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-txjgw" event={"ID":"8b2f4cca-09b7-44dc-9458-298b0e3c8507","Type":"ContainerDied","Data":"6d1268fce4fa2cfdc7a92aa145a796669d80e80eefa8d6048c7b9e1ae4c2ebf5"} Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.816078 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-txjgw" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.818494 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-msn7s" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.818500 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-msn7s" event={"ID":"e33b5e20-dd02-4850-b59b-40a271de1b3f","Type":"ContainerDied","Data":"d7afab889d9a341f2b766b14f2ef82f0ef1b041be7aa8674e9e091f3a86195d3"} Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.821224 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-m8gz7" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.822866 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wb4bw" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.822899 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wb4bw" event={"ID":"bf9611ba-47f1-43da-92fc-a4f99606500a","Type":"ContainerDied","Data":"628d5830c87c62fc4bd330045026bdc2a2a64c026696f3ef8d264e175faf0f5a"} Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.836316 5037 scope.go:117] "RemoveContainer" containerID="8275addaa95be45de966e97d7fe694e8548a52e013963a612ac6ce2643360927" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.856651 5037 scope.go:117] "RemoveContainer" containerID="ef443edbc36ea241d41854ffc60714eddd7af29908d820af53dd96c0a93f1040" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.866444 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-b5sj7"] Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.873384 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-b5sj7"] Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.893022 5037 scope.go:117] "RemoveContainer" containerID="2fff1264478bbaef430520300794f4610677c34f3c9285de0cd18e71b13117b2" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.893356 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wb4bw"] Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.897621 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-wb4bw"] Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.901963 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.919691 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-txjgw"] Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.927449 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-txjgw"] Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.931682 5037 scope.go:117] "RemoveContainer" containerID="1af61d3729cdc507cd1d56e096b3b50561f5ebbeefa85d78043fda571e830c47" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.935156 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-msn7s"] Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.942493 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-msn7s"] Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.947150 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-m8gz7"] Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.950244 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-m8gz7"] Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.957467 5037 scope.go:117] "RemoveContainer" containerID="b48c5942d91a4b3d557b6fed32103f26b65b4fccbbc64287c524d93c06a0182f" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.971618 5037 scope.go:117] "RemoveContainer" containerID="4e28fcc51c73fd80e4d786e7b960aaa63f4745e8ca7d491f35092746d37a238e" Nov 26 14:24:56 crc kubenswrapper[5037]: I1126 14:24:56.990361 5037 scope.go:117] "RemoveContainer" containerID="9e6a7138f86d07f6ec2848f6a1aa2aa1dcb7cb0e37fa067e77f20d96e0f16a19" Nov 26 14:24:57 crc kubenswrapper[5037]: I1126 14:24:57.009091 5037 scope.go:117] "RemoveContainer" containerID="2ee127915b93a0f0611a2bc7f09947588c410281c1834e927ec7155c99d46fbe" Nov 26 14:24:57 crc kubenswrapper[5037]: I1126 14:24:57.032222 5037 scope.go:117] "RemoveContainer" containerID="2c488f9ae65e5c372a57d9f3c5e80d614a49e0fedfa12815b5ba8daebd325eea" Nov 26 14:24:57 crc kubenswrapper[5037]: I1126 14:24:57.121217 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 26 14:24:57 crc kubenswrapper[5037]: I1126 14:24:57.183981 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 26 14:24:57 crc kubenswrapper[5037]: I1126 14:24:57.352069 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 26 14:24:57 crc kubenswrapper[5037]: I1126 14:24:57.358984 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 26 14:24:57 crc kubenswrapper[5037]: I1126 14:24:57.366547 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 26 14:24:57 crc kubenswrapper[5037]: I1126 14:24:57.382521 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 26 14:24:57 crc kubenswrapper[5037]: I1126 14:24:57.685063 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 26 14:24:57 crc kubenswrapper[5037]: I1126 14:24:57.690356 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 26 14:24:57 crc kubenswrapper[5037]: I1126 14:24:57.914119 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="570f926c-8f52-4b77-a139-bfa9d3b61071" path="/var/lib/kubelet/pods/570f926c-8f52-4b77-a139-bfa9d3b61071/volumes" Nov 26 14:24:57 crc kubenswrapper[5037]: I1126 14:24:57.915058 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8b2f4cca-09b7-44dc-9458-298b0e3c8507" path="/var/lib/kubelet/pods/8b2f4cca-09b7-44dc-9458-298b0e3c8507/volumes" Nov 26 14:24:57 crc kubenswrapper[5037]: I1126 14:24:57.915577 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf9611ba-47f1-43da-92fc-a4f99606500a" path="/var/lib/kubelet/pods/bf9611ba-47f1-43da-92fc-a4f99606500a/volumes" Nov 26 14:24:57 crc kubenswrapper[5037]: I1126 14:24:57.917361 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e33b5e20-dd02-4850-b59b-40a271de1b3f" path="/var/lib/kubelet/pods/e33b5e20-dd02-4850-b59b-40a271de1b3f/volumes" Nov 26 14:24:57 crc kubenswrapper[5037]: I1126 14:24:57.917960 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f379a727-1bc7-469d-8148-b7fb1abb5155" path="/var/lib/kubelet/pods/f379a727-1bc7-469d-8148-b7fb1abb5155/volumes" Nov 26 14:24:57 crc kubenswrapper[5037]: I1126 14:24:57.942459 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 26 14:24:57 crc kubenswrapper[5037]: I1126 14:24:57.944761 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 26 14:24:58 crc kubenswrapper[5037]: I1126 14:24:58.027796 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 26 14:24:58 crc kubenswrapper[5037]: I1126 14:24:58.030059 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 26 14:24:58 crc kubenswrapper[5037]: I1126 14:24:58.344043 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 26 14:24:58 crc kubenswrapper[5037]: I1126 14:24:58.551833 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 26 14:24:58 crc kubenswrapper[5037]: I1126 14:24:58.631229 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 26 14:24:59 crc kubenswrapper[5037]: I1126 14:24:59.133429 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 26 14:24:59 crc kubenswrapper[5037]: I1126 14:24:59.372560 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 26 14:24:59 crc kubenswrapper[5037]: I1126 14:24:59.496024 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 26 14:24:59 crc kubenswrapper[5037]: I1126 14:24:59.496436 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 14:24:59 crc kubenswrapper[5037]: I1126 14:24:59.639722 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 26 14:24:59 crc kubenswrapper[5037]: I1126 14:24:59.639797 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:24:59 crc kubenswrapper[5037]: I1126 14:24:59.639962 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 26 14:24:59 crc kubenswrapper[5037]: I1126 14:24:59.639997 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:24:59 crc kubenswrapper[5037]: I1126 14:24:59.640065 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 26 14:24:59 crc kubenswrapper[5037]: I1126 14:24:59.640108 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:24:59 crc kubenswrapper[5037]: I1126 14:24:59.640137 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 26 14:24:59 crc kubenswrapper[5037]: I1126 14:24:59.640253 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 26 14:24:59 crc kubenswrapper[5037]: I1126 14:24:59.640355 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:24:59 crc kubenswrapper[5037]: I1126 14:24:59.640823 5037 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Nov 26 14:24:59 crc kubenswrapper[5037]: I1126 14:24:59.640882 5037 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Nov 26 14:24:59 crc kubenswrapper[5037]: I1126 14:24:59.640899 5037 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 26 14:24:59 crc kubenswrapper[5037]: I1126 14:24:59.640944 5037 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Nov 26 14:24:59 crc kubenswrapper[5037]: I1126 14:24:59.648768 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:24:59 crc kubenswrapper[5037]: I1126 14:24:59.742012 5037 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 26 14:24:59 crc kubenswrapper[5037]: I1126 14:24:59.846354 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 26 14:24:59 crc kubenswrapper[5037]: I1126 14:24:59.846407 5037 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="eb8d9e8fc948bdfa392c4f090012c1962aaad4ff957160f49ebc9d923801c698" exitCode=137 Nov 26 14:24:59 crc kubenswrapper[5037]: I1126 14:24:59.846459 5037 scope.go:117] "RemoveContainer" containerID="eb8d9e8fc948bdfa392c4f090012c1962aaad4ff957160f49ebc9d923801c698" Nov 26 14:24:59 crc kubenswrapper[5037]: I1126 14:24:59.846492 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 14:24:59 crc kubenswrapper[5037]: I1126 14:24:59.857504 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 26 14:24:59 crc kubenswrapper[5037]: I1126 14:24:59.871449 5037 scope.go:117] "RemoveContainer" containerID="eb8d9e8fc948bdfa392c4f090012c1962aaad4ff957160f49ebc9d923801c698" Nov 26 14:24:59 crc kubenswrapper[5037]: E1126 14:24:59.871965 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb8d9e8fc948bdfa392c4f090012c1962aaad4ff957160f49ebc9d923801c698\": container with ID starting with eb8d9e8fc948bdfa392c4f090012c1962aaad4ff957160f49ebc9d923801c698 not found: ID does not exist" containerID="eb8d9e8fc948bdfa392c4f090012c1962aaad4ff957160f49ebc9d923801c698" Nov 26 14:24:59 crc kubenswrapper[5037]: I1126 14:24:59.871999 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb8d9e8fc948bdfa392c4f090012c1962aaad4ff957160f49ebc9d923801c698"} err="failed to get container status \"eb8d9e8fc948bdfa392c4f090012c1962aaad4ff957160f49ebc9d923801c698\": rpc error: code = NotFound desc = could not find container \"eb8d9e8fc948bdfa392c4f090012c1962aaad4ff957160f49ebc9d923801c698\": container with ID starting with eb8d9e8fc948bdfa392c4f090012c1962aaad4ff957160f49ebc9d923801c698 not found: ID does not exist" Nov 26 14:24:59 crc kubenswrapper[5037]: I1126 14:24:59.883763 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 26 14:24:59 crc kubenswrapper[5037]: I1126 14:24:59.918199 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Nov 26 14:24:59 crc kubenswrapper[5037]: I1126 14:24:59.920382 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 26 14:25:00 crc kubenswrapper[5037]: I1126 14:25:00.878782 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 26 14:25:01 crc kubenswrapper[5037]: I1126 14:25:01.473398 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 26 14:25:11 crc kubenswrapper[5037]: I1126 14:25:11.247369 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 14:25:11 crc kubenswrapper[5037]: I1126 14:25:11.248212 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 14:25:11 crc kubenswrapper[5037]: I1126 14:25:11.248308 5037 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" Nov 26 14:25:11 crc kubenswrapper[5037]: I1126 14:25:11.249169 5037 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b7afa716ab555c514aa4b783f55103f0b795f534b642704349668ad1f4f2718c"} pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 14:25:11 crc kubenswrapper[5037]: I1126 14:25:11.249254 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" containerID="cri-o://b7afa716ab555c514aa4b783f55103f0b795f534b642704349668ad1f4f2718c" gracePeriod=600 Nov 26 14:25:11 crc kubenswrapper[5037]: I1126 14:25:11.917730 5037 generic.go:334] "Generic (PLEG): container finished" podID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerID="b7afa716ab555c514aa4b783f55103f0b795f534b642704349668ad1f4f2718c" exitCode=0 Nov 26 14:25:11 crc kubenswrapper[5037]: I1126 14:25:11.918600 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" event={"ID":"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb","Type":"ContainerDied","Data":"b7afa716ab555c514aa4b783f55103f0b795f534b642704349668ad1f4f2718c"} Nov 26 14:25:11 crc kubenswrapper[5037]: I1126 14:25:11.918642 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" event={"ID":"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb","Type":"ContainerStarted","Data":"349f155942970ffc9212c9698596c47d1a0002439affbcd295ff8f4a649f33b4"} Nov 26 14:25:11 crc kubenswrapper[5037]: I1126 14:25:11.918666 5037 scope.go:117] "RemoveContainer" containerID="0ca155f1028f7449a83057d2b19d8707af01de25fbb8c44d82c60ea823dd3d64" Nov 26 14:25:26 crc kubenswrapper[5037]: I1126 14:25:26.016410 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Nov 26 14:25:26 crc kubenswrapper[5037]: I1126 14:25:26.018629 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 26 14:25:26 crc kubenswrapper[5037]: I1126 14:25:26.018676 5037 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="f9fb40442ba14b374379f24adac9b29483b27eaddbe3af4f14e9573c104ed7bb" exitCode=137 Nov 26 14:25:26 crc kubenswrapper[5037]: I1126 14:25:26.018720 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"f9fb40442ba14b374379f24adac9b29483b27eaddbe3af4f14e9573c104ed7bb"} Nov 26 14:25:26 crc kubenswrapper[5037]: I1126 14:25:26.018775 5037 scope.go:117] "RemoveContainer" containerID="3301668cbbfd54f18852c869e3b3243148d59dc93434322a99b7092bf85d30bf" Nov 26 14:25:27 crc kubenswrapper[5037]: I1126 14:25:27.048683 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Nov 26 14:25:27 crc kubenswrapper[5037]: I1126 14:25:27.050828 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"cd3a7216ccffe7e108a2dd165f5d1b9859a22f37fecf077ea1c09736663d5c60"} Nov 26 14:25:29 crc kubenswrapper[5037]: I1126 14:25:29.315913 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 14:25:34 crc kubenswrapper[5037]: I1126 14:25:34.473118 5037 scope.go:117] "RemoveContainer" containerID="25ac19827c3815eae23d456da47c5d846d63b191e05808a65c074afa3f4984b0" Nov 26 14:25:34 crc kubenswrapper[5037]: I1126 14:25:34.496423 5037 scope.go:117] "RemoveContainer" containerID="8c444b1d0fc3bc572c83aa6b76e8d19b5f1c58d235ef6db0662ae427b82707c7" Nov 26 14:25:34 crc kubenswrapper[5037]: I1126 14:25:34.520396 5037 scope.go:117] "RemoveContainer" containerID="b3d8f520dd0136bed9b91717b97b86d5a4876731097abf9bd309601c7677a6d2" Nov 26 14:25:35 crc kubenswrapper[5037]: I1126 14:25:35.818411 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 14:25:35 crc kubenswrapper[5037]: I1126 14:25:35.840358 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 14:25:36 crc kubenswrapper[5037]: I1126 14:25:36.836202 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.717001 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-xf9zc"] Nov 26 14:25:45 crc kubenswrapper[5037]: E1126 14:25:45.717647 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e33b5e20-dd02-4850-b59b-40a271de1b3f" containerName="registry-server" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.717665 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="e33b5e20-dd02-4850-b59b-40a271de1b3f" containerName="registry-server" Nov 26 14:25:45 crc kubenswrapper[5037]: E1126 14:25:45.717681 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e33b5e20-dd02-4850-b59b-40a271de1b3f" containerName="extract-content" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.717689 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="e33b5e20-dd02-4850-b59b-40a271de1b3f" containerName="extract-content" Nov 26 14:25:45 crc kubenswrapper[5037]: E1126 14:25:45.717701 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f379a727-1bc7-469d-8148-b7fb1abb5155" containerName="registry-server" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.717711 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="f379a727-1bc7-469d-8148-b7fb1abb5155" containerName="registry-server" Nov 26 14:25:45 crc kubenswrapper[5037]: E1126 14:25:45.717722 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb" containerName="registry" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.717731 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb" containerName="registry" Nov 26 14:25:45 crc kubenswrapper[5037]: E1126 14:25:45.717742 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b2f4cca-09b7-44dc-9458-298b0e3c8507" containerName="marketplace-operator" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.717750 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b2f4cca-09b7-44dc-9458-298b0e3c8507" containerName="marketplace-operator" Nov 26 14:25:45 crc kubenswrapper[5037]: E1126 14:25:45.717759 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="570f926c-8f52-4b77-a139-bfa9d3b61071" containerName="registry-server" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.717766 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="570f926c-8f52-4b77-a139-bfa9d3b61071" containerName="registry-server" Nov 26 14:25:45 crc kubenswrapper[5037]: E1126 14:25:45.717776 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f379a727-1bc7-469d-8148-b7fb1abb5155" containerName="extract-content" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.717783 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="f379a727-1bc7-469d-8148-b7fb1abb5155" containerName="extract-content" Nov 26 14:25:45 crc kubenswrapper[5037]: E1126 14:25:45.717794 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f379a727-1bc7-469d-8148-b7fb1abb5155" containerName="extract-utilities" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.717801 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="f379a727-1bc7-469d-8148-b7fb1abb5155" containerName="extract-utilities" Nov 26 14:25:45 crc kubenswrapper[5037]: E1126 14:25:45.717811 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf9611ba-47f1-43da-92fc-a4f99606500a" containerName="extract-utilities" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.717818 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf9611ba-47f1-43da-92fc-a4f99606500a" containerName="extract-utilities" Nov 26 14:25:45 crc kubenswrapper[5037]: E1126 14:25:45.717827 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf9611ba-47f1-43da-92fc-a4f99606500a" containerName="registry-server" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.717834 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf9611ba-47f1-43da-92fc-a4f99606500a" containerName="registry-server" Nov 26 14:25:45 crc kubenswrapper[5037]: E1126 14:25:45.717846 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="570f926c-8f52-4b77-a139-bfa9d3b61071" containerName="extract-utilities" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.717855 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="570f926c-8f52-4b77-a139-bfa9d3b61071" containerName="extract-utilities" Nov 26 14:25:45 crc kubenswrapper[5037]: E1126 14:25:45.717866 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf9611ba-47f1-43da-92fc-a4f99606500a" containerName="extract-content" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.717874 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf9611ba-47f1-43da-92fc-a4f99606500a" containerName="extract-content" Nov 26 14:25:45 crc kubenswrapper[5037]: E1126 14:25:45.717886 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="570f926c-8f52-4b77-a139-bfa9d3b61071" containerName="extract-content" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.717893 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="570f926c-8f52-4b77-a139-bfa9d3b61071" containerName="extract-content" Nov 26 14:25:45 crc kubenswrapper[5037]: E1126 14:25:45.717905 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e33b5e20-dd02-4850-b59b-40a271de1b3f" containerName="extract-utilities" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.717913 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="e33b5e20-dd02-4850-b59b-40a271de1b3f" containerName="extract-utilities" Nov 26 14:25:45 crc kubenswrapper[5037]: E1126 14:25:45.717920 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="287ee161-b8a6-4dbf-b2e7-d32380e75f47" containerName="installer" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.717927 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="287ee161-b8a6-4dbf-b2e7-d32380e75f47" containerName="installer" Nov 26 14:25:45 crc kubenswrapper[5037]: E1126 14:25:45.717937 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.717946 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.718054 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="287ee161-b8a6-4dbf-b2e7-d32380e75f47" containerName="installer" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.718067 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="e33b5e20-dd02-4850-b59b-40a271de1b3f" containerName="registry-server" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.718078 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="570f926c-8f52-4b77-a139-bfa9d3b61071" containerName="registry-server" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.718085 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.718098 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b2f4cca-09b7-44dc-9458-298b0e3c8507" containerName="marketplace-operator" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.718107 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="f379a727-1bc7-469d-8148-b7fb1abb5155" containerName="registry-server" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.718114 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf9611ba-47f1-43da-92fc-a4f99606500a" containerName="registry-server" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.718124 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd96f84a-d233-4a35-9c2e-5b7f3c8c8fbb" containerName="registry" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.718684 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-xf9zc" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.720685 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.721047 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.721769 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.722554 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.727020 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-sk94z"] Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.727210 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-sk94z" podUID="ce2d6221-7202-44cf-a85e-dec10e764129" containerName="controller-manager" containerID="cri-o://f779eddf583ad6d58305391dcd83ea594be424fd15fcb3078262ca8f6dddd585" gracePeriod=30 Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.729644 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-glk27"] Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.730097 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-glk27" podUID="290387b2-4285-4359-bfdc-f89128f0c0a2" containerName="route-controller-manager" containerID="cri-o://a9deed0dd837df59f7ab69ece1f66419f5f59a7db6659e9220b571a9e144963b" gracePeriod=30 Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.736574 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.746524 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-xf9zc"] Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.757630 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/04c1788d-27d1-4615-9147-c4dc6fad86bb-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-xf9zc\" (UID: \"04c1788d-27d1-4615-9147-c4dc6fad86bb\") " pod="openshift-marketplace/marketplace-operator-79b997595-xf9zc" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.757701 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/04c1788d-27d1-4615-9147-c4dc6fad86bb-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-xf9zc\" (UID: \"04c1788d-27d1-4615-9147-c4dc6fad86bb\") " pod="openshift-marketplace/marketplace-operator-79b997595-xf9zc" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.757775 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dqjbj\" (UniqueName: \"kubernetes.io/projected/04c1788d-27d1-4615-9147-c4dc6fad86bb-kube-api-access-dqjbj\") pod \"marketplace-operator-79b997595-xf9zc\" (UID: \"04c1788d-27d1-4615-9147-c4dc6fad86bb\") " pod="openshift-marketplace/marketplace-operator-79b997595-xf9zc" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.850764 5037 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-glk27 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" start-of-body= Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.850865 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-glk27" podUID="290387b2-4285-4359-bfdc-f89128f0c0a2" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.859307 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dqjbj\" (UniqueName: \"kubernetes.io/projected/04c1788d-27d1-4615-9147-c4dc6fad86bb-kube-api-access-dqjbj\") pod \"marketplace-operator-79b997595-xf9zc\" (UID: \"04c1788d-27d1-4615-9147-c4dc6fad86bb\") " pod="openshift-marketplace/marketplace-operator-79b997595-xf9zc" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.859399 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/04c1788d-27d1-4615-9147-c4dc6fad86bb-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-xf9zc\" (UID: \"04c1788d-27d1-4615-9147-c4dc6fad86bb\") " pod="openshift-marketplace/marketplace-operator-79b997595-xf9zc" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.859435 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/04c1788d-27d1-4615-9147-c4dc6fad86bb-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-xf9zc\" (UID: \"04c1788d-27d1-4615-9147-c4dc6fad86bb\") " pod="openshift-marketplace/marketplace-operator-79b997595-xf9zc" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.862585 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/04c1788d-27d1-4615-9147-c4dc6fad86bb-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-xf9zc\" (UID: \"04c1788d-27d1-4615-9147-c4dc6fad86bb\") " pod="openshift-marketplace/marketplace-operator-79b997595-xf9zc" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.868392 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/04c1788d-27d1-4615-9147-c4dc6fad86bb-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-xf9zc\" (UID: \"04c1788d-27d1-4615-9147-c4dc6fad86bb\") " pod="openshift-marketplace/marketplace-operator-79b997595-xf9zc" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.881488 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dqjbj\" (UniqueName: \"kubernetes.io/projected/04c1788d-27d1-4615-9147-c4dc6fad86bb-kube-api-access-dqjbj\") pod \"marketplace-operator-79b997595-xf9zc\" (UID: \"04c1788d-27d1-4615-9147-c4dc6fad86bb\") " pod="openshift-marketplace/marketplace-operator-79b997595-xf9zc" Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.898727 5037 generic.go:334] "Generic (PLEG): container finished" podID="ce2d6221-7202-44cf-a85e-dec10e764129" containerID="f779eddf583ad6d58305391dcd83ea594be424fd15fcb3078262ca8f6dddd585" exitCode=0 Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.898862 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-sk94z" event={"ID":"ce2d6221-7202-44cf-a85e-dec10e764129","Type":"ContainerDied","Data":"f779eddf583ad6d58305391dcd83ea594be424fd15fcb3078262ca8f6dddd585"} Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.908554 5037 generic.go:334] "Generic (PLEG): container finished" podID="290387b2-4285-4359-bfdc-f89128f0c0a2" containerID="a9deed0dd837df59f7ab69ece1f66419f5f59a7db6659e9220b571a9e144963b" exitCode=0 Nov 26 14:25:45 crc kubenswrapper[5037]: I1126 14:25:45.919531 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-glk27" event={"ID":"290387b2-4285-4359-bfdc-f89128f0c0a2","Type":"ContainerDied","Data":"a9deed0dd837df59f7ab69ece1f66419f5f59a7db6659e9220b571a9e144963b"} Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.037178 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-xf9zc" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.134627 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-glk27" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.186644 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-sk94z" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.266204 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ce2d6221-7202-44cf-a85e-dec10e764129-serving-cert\") pod \"ce2d6221-7202-44cf-a85e-dec10e764129\" (UID: \"ce2d6221-7202-44cf-a85e-dec10e764129\") " Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.266353 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ce2d6221-7202-44cf-a85e-dec10e764129-proxy-ca-bundles\") pod \"ce2d6221-7202-44cf-a85e-dec10e764129\" (UID: \"ce2d6221-7202-44cf-a85e-dec10e764129\") " Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.266391 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ce2d6221-7202-44cf-a85e-dec10e764129-client-ca\") pod \"ce2d6221-7202-44cf-a85e-dec10e764129\" (UID: \"ce2d6221-7202-44cf-a85e-dec10e764129\") " Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.266493 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce2d6221-7202-44cf-a85e-dec10e764129-config\") pod \"ce2d6221-7202-44cf-a85e-dec10e764129\" (UID: \"ce2d6221-7202-44cf-a85e-dec10e764129\") " Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.266521 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/290387b2-4285-4359-bfdc-f89128f0c0a2-client-ca\") pod \"290387b2-4285-4359-bfdc-f89128f0c0a2\" (UID: \"290387b2-4285-4359-bfdc-f89128f0c0a2\") " Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.267437 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ce2d6221-7202-44cf-a85e-dec10e764129-client-ca" (OuterVolumeSpecName: "client-ca") pod "ce2d6221-7202-44cf-a85e-dec10e764129" (UID: "ce2d6221-7202-44cf-a85e-dec10e764129"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.268111 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ce2d6221-7202-44cf-a85e-dec10e764129-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "ce2d6221-7202-44cf-a85e-dec10e764129" (UID: "ce2d6221-7202-44cf-a85e-dec10e764129"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.266556 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dwrf6\" (UniqueName: \"kubernetes.io/projected/290387b2-4285-4359-bfdc-f89128f0c0a2-kube-api-access-dwrf6\") pod \"290387b2-4285-4359-bfdc-f89128f0c0a2\" (UID: \"290387b2-4285-4359-bfdc-f89128f0c0a2\") " Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.268411 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/290387b2-4285-4359-bfdc-f89128f0c0a2-serving-cert\") pod \"290387b2-4285-4359-bfdc-f89128f0c0a2\" (UID: \"290387b2-4285-4359-bfdc-f89128f0c0a2\") " Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.268519 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ce2d6221-7202-44cf-a85e-dec10e764129-config" (OuterVolumeSpecName: "config") pod "ce2d6221-7202-44cf-a85e-dec10e764129" (UID: "ce2d6221-7202-44cf-a85e-dec10e764129"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.268824 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/290387b2-4285-4359-bfdc-f89128f0c0a2-client-ca" (OuterVolumeSpecName: "client-ca") pod "290387b2-4285-4359-bfdc-f89128f0c0a2" (UID: "290387b2-4285-4359-bfdc-f89128f0c0a2"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.268957 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4pb69\" (UniqueName: \"kubernetes.io/projected/ce2d6221-7202-44cf-a85e-dec10e764129-kube-api-access-4pb69\") pod \"ce2d6221-7202-44cf-a85e-dec10e764129\" (UID: \"ce2d6221-7202-44cf-a85e-dec10e764129\") " Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.268994 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/290387b2-4285-4359-bfdc-f89128f0c0a2-config\") pod \"290387b2-4285-4359-bfdc-f89128f0c0a2\" (UID: \"290387b2-4285-4359-bfdc-f89128f0c0a2\") " Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.269425 5037 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ce2d6221-7202-44cf-a85e-dec10e764129-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.269440 5037 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ce2d6221-7202-44cf-a85e-dec10e764129-client-ca\") on node \"crc\" DevicePath \"\"" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.269495 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce2d6221-7202-44cf-a85e-dec10e764129-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.269505 5037 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/290387b2-4285-4359-bfdc-f89128f0c0a2-client-ca\") on node \"crc\" DevicePath \"\"" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.272314 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/290387b2-4285-4359-bfdc-f89128f0c0a2-kube-api-access-dwrf6" (OuterVolumeSpecName: "kube-api-access-dwrf6") pod "290387b2-4285-4359-bfdc-f89128f0c0a2" (UID: "290387b2-4285-4359-bfdc-f89128f0c0a2"). InnerVolumeSpecName "kube-api-access-dwrf6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.273044 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/290387b2-4285-4359-bfdc-f89128f0c0a2-config" (OuterVolumeSpecName: "config") pod "290387b2-4285-4359-bfdc-f89128f0c0a2" (UID: "290387b2-4285-4359-bfdc-f89128f0c0a2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.273635 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce2d6221-7202-44cf-a85e-dec10e764129-kube-api-access-4pb69" (OuterVolumeSpecName: "kube-api-access-4pb69") pod "ce2d6221-7202-44cf-a85e-dec10e764129" (UID: "ce2d6221-7202-44cf-a85e-dec10e764129"). InnerVolumeSpecName "kube-api-access-4pb69". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.273693 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce2d6221-7202-44cf-a85e-dec10e764129-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "ce2d6221-7202-44cf-a85e-dec10e764129" (UID: "ce2d6221-7202-44cf-a85e-dec10e764129"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.274330 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/290387b2-4285-4359-bfdc-f89128f0c0a2-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "290387b2-4285-4359-bfdc-f89128f0c0a2" (UID: "290387b2-4285-4359-bfdc-f89128f0c0a2"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.351986 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-xf9zc"] Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.370260 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/290387b2-4285-4359-bfdc-f89128f0c0a2-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.370312 5037 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ce2d6221-7202-44cf-a85e-dec10e764129-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.370328 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dwrf6\" (UniqueName: \"kubernetes.io/projected/290387b2-4285-4359-bfdc-f89128f0c0a2-kube-api-access-dwrf6\") on node \"crc\" DevicePath \"\"" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.370342 5037 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/290387b2-4285-4359-bfdc-f89128f0c0a2-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.370353 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4pb69\" (UniqueName: \"kubernetes.io/projected/ce2d6221-7202-44cf-a85e-dec10e764129-kube-api-access-4pb69\") on node \"crc\" DevicePath \"\"" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.916757 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-glk27" event={"ID":"290387b2-4285-4359-bfdc-f89128f0c0a2","Type":"ContainerDied","Data":"574d901382b8a51824f3ad03a3128e00bef13fe83ff84d0f6b90222c37be891f"} Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.916795 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-glk27" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.917057 5037 scope.go:117] "RemoveContainer" containerID="a9deed0dd837df59f7ab69ece1f66419f5f59a7db6659e9220b571a9e144963b" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.918845 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-xf9zc" event={"ID":"04c1788d-27d1-4615-9147-c4dc6fad86bb","Type":"ContainerStarted","Data":"1f3a08c89e22fdfe6664e7bd61621d54bfe518ab1bae5305abd0984ffe267496"} Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.918891 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-xf9zc" event={"ID":"04c1788d-27d1-4615-9147-c4dc6fad86bb","Type":"ContainerStarted","Data":"3dd0d8f112ba64912f9ce274176fbd7ada1fed4035f160dc396d8957d50cdf58"} Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.920022 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-xf9zc" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.922895 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-sk94z" event={"ID":"ce2d6221-7202-44cf-a85e-dec10e764129","Type":"ContainerDied","Data":"18a751b202ab17be8c52ff7cd6c17daf17d8ce0ddac15fcefd0f113baa56eedf"} Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.922967 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-sk94z" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.928857 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-xf9zc" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.938953 5037 scope.go:117] "RemoveContainer" containerID="f779eddf583ad6d58305391dcd83ea594be424fd15fcb3078262ca8f6dddd585" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.953651 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-xf9zc" podStartSLOduration=1.9536304310000001 podStartE2EDuration="1.953630431s" podCreationTimestamp="2025-11-26 14:25:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:25:46.9506602 +0000 UTC m=+613.747430394" watchObservedRunningTime="2025-11-26 14:25:46.953630431 +0000 UTC m=+613.750400615" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.969740 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-69bb556988-7rtlm"] Nov 26 14:25:46 crc kubenswrapper[5037]: E1126 14:25:46.969984 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="290387b2-4285-4359-bfdc-f89128f0c0a2" containerName="route-controller-manager" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.969998 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="290387b2-4285-4359-bfdc-f89128f0c0a2" containerName="route-controller-manager" Nov 26 14:25:46 crc kubenswrapper[5037]: E1126 14:25:46.970007 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce2d6221-7202-44cf-a85e-dec10e764129" containerName="controller-manager" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.970012 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce2d6221-7202-44cf-a85e-dec10e764129" containerName="controller-manager" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.970114 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce2d6221-7202-44cf-a85e-dec10e764129" containerName="controller-manager" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.970130 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="290387b2-4285-4359-bfdc-f89128f0c0a2" containerName="route-controller-manager" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.970586 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-69bb556988-7rtlm" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.972190 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.972948 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.973299 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.973955 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.974239 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.974926 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.975135 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-67c667d7c4-hpswp"] Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.975794 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-67c667d7c4-hpswp" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.977043 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/917ea42e-70fd-4dd6-adae-da0fdd3f5846-client-ca\") pod \"controller-manager-69bb556988-7rtlm\" (UID: \"917ea42e-70fd-4dd6-adae-da0fdd3f5846\") " pod="openshift-controller-manager/controller-manager-69bb556988-7rtlm" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.977093 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qwckw\" (UniqueName: \"kubernetes.io/projected/917ea42e-70fd-4dd6-adae-da0fdd3f5846-kube-api-access-qwckw\") pod \"controller-manager-69bb556988-7rtlm\" (UID: \"917ea42e-70fd-4dd6-adae-da0fdd3f5846\") " pod="openshift-controller-manager/controller-manager-69bb556988-7rtlm" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.977142 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/917ea42e-70fd-4dd6-adae-da0fdd3f5846-config\") pod \"controller-manager-69bb556988-7rtlm\" (UID: \"917ea42e-70fd-4dd6-adae-da0fdd3f5846\") " pod="openshift-controller-manager/controller-manager-69bb556988-7rtlm" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.977170 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/917ea42e-70fd-4dd6-adae-da0fdd3f5846-proxy-ca-bundles\") pod \"controller-manager-69bb556988-7rtlm\" (UID: \"917ea42e-70fd-4dd6-adae-da0fdd3f5846\") " pod="openshift-controller-manager/controller-manager-69bb556988-7rtlm" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.977205 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/917ea42e-70fd-4dd6-adae-da0fdd3f5846-serving-cert\") pod \"controller-manager-69bb556988-7rtlm\" (UID: \"917ea42e-70fd-4dd6-adae-da0fdd3f5846\") " pod="openshift-controller-manager/controller-manager-69bb556988-7rtlm" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.978405 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.979124 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.979166 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.979385 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.979652 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.979906 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.984907 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 26 14:25:46 crc kubenswrapper[5037]: I1126 14:25:46.996417 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-glk27"] Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.002491 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-69bb556988-7rtlm"] Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.008888 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-glk27"] Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.012761 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-sk94z"] Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.015218 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-sk94z"] Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.024681 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-67c667d7c4-hpswp"] Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.077542 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/917ea42e-70fd-4dd6-adae-da0fdd3f5846-config\") pod \"controller-manager-69bb556988-7rtlm\" (UID: \"917ea42e-70fd-4dd6-adae-da0fdd3f5846\") " pod="openshift-controller-manager/controller-manager-69bb556988-7rtlm" Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.077600 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5423cfc4-24c6-485d-a87a-5cd9141d9f17-config\") pod \"route-controller-manager-67c667d7c4-hpswp\" (UID: \"5423cfc4-24c6-485d-a87a-5cd9141d9f17\") " pod="openshift-route-controller-manager/route-controller-manager-67c667d7c4-hpswp" Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.077639 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/917ea42e-70fd-4dd6-adae-da0fdd3f5846-proxy-ca-bundles\") pod \"controller-manager-69bb556988-7rtlm\" (UID: \"917ea42e-70fd-4dd6-adae-da0fdd3f5846\") " pod="openshift-controller-manager/controller-manager-69bb556988-7rtlm" Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.077673 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jzxbk\" (UniqueName: \"kubernetes.io/projected/5423cfc4-24c6-485d-a87a-5cd9141d9f17-kube-api-access-jzxbk\") pod \"route-controller-manager-67c667d7c4-hpswp\" (UID: \"5423cfc4-24c6-485d-a87a-5cd9141d9f17\") " pod="openshift-route-controller-manager/route-controller-manager-67c667d7c4-hpswp" Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.077699 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/917ea42e-70fd-4dd6-adae-da0fdd3f5846-serving-cert\") pod \"controller-manager-69bb556988-7rtlm\" (UID: \"917ea42e-70fd-4dd6-adae-da0fdd3f5846\") " pod="openshift-controller-manager/controller-manager-69bb556988-7rtlm" Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.078078 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/917ea42e-70fd-4dd6-adae-da0fdd3f5846-client-ca\") pod \"controller-manager-69bb556988-7rtlm\" (UID: \"917ea42e-70fd-4dd6-adae-da0fdd3f5846\") " pod="openshift-controller-manager/controller-manager-69bb556988-7rtlm" Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.078167 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5423cfc4-24c6-485d-a87a-5cd9141d9f17-serving-cert\") pod \"route-controller-manager-67c667d7c4-hpswp\" (UID: \"5423cfc4-24c6-485d-a87a-5cd9141d9f17\") " pod="openshift-route-controller-manager/route-controller-manager-67c667d7c4-hpswp" Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.078212 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qwckw\" (UniqueName: \"kubernetes.io/projected/917ea42e-70fd-4dd6-adae-da0fdd3f5846-kube-api-access-qwckw\") pod \"controller-manager-69bb556988-7rtlm\" (UID: \"917ea42e-70fd-4dd6-adae-da0fdd3f5846\") " pod="openshift-controller-manager/controller-manager-69bb556988-7rtlm" Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.078245 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5423cfc4-24c6-485d-a87a-5cd9141d9f17-client-ca\") pod \"route-controller-manager-67c667d7c4-hpswp\" (UID: \"5423cfc4-24c6-485d-a87a-5cd9141d9f17\") " pod="openshift-route-controller-manager/route-controller-manager-67c667d7c4-hpswp" Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.079074 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/917ea42e-70fd-4dd6-adae-da0fdd3f5846-client-ca\") pod \"controller-manager-69bb556988-7rtlm\" (UID: \"917ea42e-70fd-4dd6-adae-da0fdd3f5846\") " pod="openshift-controller-manager/controller-manager-69bb556988-7rtlm" Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.079093 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/917ea42e-70fd-4dd6-adae-da0fdd3f5846-proxy-ca-bundles\") pod \"controller-manager-69bb556988-7rtlm\" (UID: \"917ea42e-70fd-4dd6-adae-da0fdd3f5846\") " pod="openshift-controller-manager/controller-manager-69bb556988-7rtlm" Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.080228 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/917ea42e-70fd-4dd6-adae-da0fdd3f5846-config\") pod \"controller-manager-69bb556988-7rtlm\" (UID: \"917ea42e-70fd-4dd6-adae-da0fdd3f5846\") " pod="openshift-controller-manager/controller-manager-69bb556988-7rtlm" Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.091427 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/917ea42e-70fd-4dd6-adae-da0fdd3f5846-serving-cert\") pod \"controller-manager-69bb556988-7rtlm\" (UID: \"917ea42e-70fd-4dd6-adae-da0fdd3f5846\") " pod="openshift-controller-manager/controller-manager-69bb556988-7rtlm" Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.094010 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qwckw\" (UniqueName: \"kubernetes.io/projected/917ea42e-70fd-4dd6-adae-da0fdd3f5846-kube-api-access-qwckw\") pod \"controller-manager-69bb556988-7rtlm\" (UID: \"917ea42e-70fd-4dd6-adae-da0fdd3f5846\") " pod="openshift-controller-manager/controller-manager-69bb556988-7rtlm" Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.180006 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5423cfc4-24c6-485d-a87a-5cd9141d9f17-serving-cert\") pod \"route-controller-manager-67c667d7c4-hpswp\" (UID: \"5423cfc4-24c6-485d-a87a-5cd9141d9f17\") " pod="openshift-route-controller-manager/route-controller-manager-67c667d7c4-hpswp" Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.180074 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5423cfc4-24c6-485d-a87a-5cd9141d9f17-client-ca\") pod \"route-controller-manager-67c667d7c4-hpswp\" (UID: \"5423cfc4-24c6-485d-a87a-5cd9141d9f17\") " pod="openshift-route-controller-manager/route-controller-manager-67c667d7c4-hpswp" Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.180130 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5423cfc4-24c6-485d-a87a-5cd9141d9f17-config\") pod \"route-controller-manager-67c667d7c4-hpswp\" (UID: \"5423cfc4-24c6-485d-a87a-5cd9141d9f17\") " pod="openshift-route-controller-manager/route-controller-manager-67c667d7c4-hpswp" Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.180190 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jzxbk\" (UniqueName: \"kubernetes.io/projected/5423cfc4-24c6-485d-a87a-5cd9141d9f17-kube-api-access-jzxbk\") pod \"route-controller-manager-67c667d7c4-hpswp\" (UID: \"5423cfc4-24c6-485d-a87a-5cd9141d9f17\") " pod="openshift-route-controller-manager/route-controller-manager-67c667d7c4-hpswp" Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.181365 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5423cfc4-24c6-485d-a87a-5cd9141d9f17-config\") pod \"route-controller-manager-67c667d7c4-hpswp\" (UID: \"5423cfc4-24c6-485d-a87a-5cd9141d9f17\") " pod="openshift-route-controller-manager/route-controller-manager-67c667d7c4-hpswp" Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.181477 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5423cfc4-24c6-485d-a87a-5cd9141d9f17-client-ca\") pod \"route-controller-manager-67c667d7c4-hpswp\" (UID: \"5423cfc4-24c6-485d-a87a-5cd9141d9f17\") " pod="openshift-route-controller-manager/route-controller-manager-67c667d7c4-hpswp" Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.184925 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5423cfc4-24c6-485d-a87a-5cd9141d9f17-serving-cert\") pod \"route-controller-manager-67c667d7c4-hpswp\" (UID: \"5423cfc4-24c6-485d-a87a-5cd9141d9f17\") " pod="openshift-route-controller-manager/route-controller-manager-67c667d7c4-hpswp" Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.197872 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jzxbk\" (UniqueName: \"kubernetes.io/projected/5423cfc4-24c6-485d-a87a-5cd9141d9f17-kube-api-access-jzxbk\") pod \"route-controller-manager-67c667d7c4-hpswp\" (UID: \"5423cfc4-24c6-485d-a87a-5cd9141d9f17\") " pod="openshift-route-controller-manager/route-controller-manager-67c667d7c4-hpswp" Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.289630 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-69bb556988-7rtlm" Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.317198 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-67c667d7c4-hpswp" Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.501997 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-69bb556988-7rtlm"] Nov 26 14:25:47 crc kubenswrapper[5037]: W1126 14:25:47.514040 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod917ea42e_70fd_4dd6_adae_da0fdd3f5846.slice/crio-e8c4b0309ab439484202e9a5d401f03e118ff1097ddcc789510da3c51d647b95 WatchSource:0}: Error finding container e8c4b0309ab439484202e9a5d401f03e118ff1097ddcc789510da3c51d647b95: Status 404 returned error can't find the container with id e8c4b0309ab439484202e9a5d401f03e118ff1097ddcc789510da3c51d647b95 Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.574522 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-67c667d7c4-hpswp"] Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.915001 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="290387b2-4285-4359-bfdc-f89128f0c0a2" path="/var/lib/kubelet/pods/290387b2-4285-4359-bfdc-f89128f0c0a2/volumes" Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.917327 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ce2d6221-7202-44cf-a85e-dec10e764129" path="/var/lib/kubelet/pods/ce2d6221-7202-44cf-a85e-dec10e764129/volumes" Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.944107 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-67c667d7c4-hpswp" event={"ID":"5423cfc4-24c6-485d-a87a-5cd9141d9f17","Type":"ContainerStarted","Data":"a42ef1d0e78d8ea5c980a9062c6d549c55d9ee1a0fb10f7233ab51ec3cd2ffcd"} Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.944163 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-67c667d7c4-hpswp" event={"ID":"5423cfc4-24c6-485d-a87a-5cd9141d9f17","Type":"ContainerStarted","Data":"b01df39958b0f468f00b0d18bffa95bba842c3f47b008d46c64f2b29e02ace88"} Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.944838 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-67c667d7c4-hpswp" Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.946523 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-69bb556988-7rtlm" event={"ID":"917ea42e-70fd-4dd6-adae-da0fdd3f5846","Type":"ContainerStarted","Data":"5d96053b5281b58d548b184acf9ce081e9b06d055fa469343c4bffc8c3288846"} Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.946569 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-69bb556988-7rtlm" event={"ID":"917ea42e-70fd-4dd6-adae-da0fdd3f5846","Type":"ContainerStarted","Data":"e8c4b0309ab439484202e9a5d401f03e118ff1097ddcc789510da3c51d647b95"} Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.946762 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-69bb556988-7rtlm" Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.951484 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-69bb556988-7rtlm" Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.957858 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-67c667d7c4-hpswp" Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.970970 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-67c667d7c4-hpswp" podStartSLOduration=2.970952874 podStartE2EDuration="2.970952874s" podCreationTimestamp="2025-11-26 14:25:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:25:47.968778163 +0000 UTC m=+614.765548347" watchObservedRunningTime="2025-11-26 14:25:47.970952874 +0000 UTC m=+614.767723058" Nov 26 14:25:47 crc kubenswrapper[5037]: I1126 14:25:47.992974 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-69bb556988-7rtlm" podStartSLOduration=2.992951044 podStartE2EDuration="2.992951044s" podCreationTimestamp="2025-11-26 14:25:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:25:47.99024677 +0000 UTC m=+614.787016964" watchObservedRunningTime="2025-11-26 14:25:47.992951044 +0000 UTC m=+614.789721228" Nov 26 14:25:52 crc kubenswrapper[5037]: I1126 14:25:52.691071 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-nsmvk"] Nov 26 14:25:52 crc kubenswrapper[5037]: I1126 14:25:52.693368 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nsmvk" Nov 26 14:25:52 crc kubenswrapper[5037]: I1126 14:25:52.697604 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 26 14:25:52 crc kubenswrapper[5037]: I1126 14:25:52.709483 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nsmvk"] Nov 26 14:25:52 crc kubenswrapper[5037]: I1126 14:25:52.776092 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-44q72\" (UniqueName: \"kubernetes.io/projected/5cb26ae3-4fcb-4bcb-8118-1471510b9589-kube-api-access-44q72\") pod \"redhat-operators-nsmvk\" (UID: \"5cb26ae3-4fcb-4bcb-8118-1471510b9589\") " pod="openshift-marketplace/redhat-operators-nsmvk" Nov 26 14:25:52 crc kubenswrapper[5037]: I1126 14:25:52.776327 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5cb26ae3-4fcb-4bcb-8118-1471510b9589-catalog-content\") pod \"redhat-operators-nsmvk\" (UID: \"5cb26ae3-4fcb-4bcb-8118-1471510b9589\") " pod="openshift-marketplace/redhat-operators-nsmvk" Nov 26 14:25:52 crc kubenswrapper[5037]: I1126 14:25:52.776386 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5cb26ae3-4fcb-4bcb-8118-1471510b9589-utilities\") pod \"redhat-operators-nsmvk\" (UID: \"5cb26ae3-4fcb-4bcb-8118-1471510b9589\") " pod="openshift-marketplace/redhat-operators-nsmvk" Nov 26 14:25:52 crc kubenswrapper[5037]: I1126 14:25:52.878521 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5cb26ae3-4fcb-4bcb-8118-1471510b9589-catalog-content\") pod \"redhat-operators-nsmvk\" (UID: \"5cb26ae3-4fcb-4bcb-8118-1471510b9589\") " pod="openshift-marketplace/redhat-operators-nsmvk" Nov 26 14:25:52 crc kubenswrapper[5037]: I1126 14:25:52.878584 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5cb26ae3-4fcb-4bcb-8118-1471510b9589-utilities\") pod \"redhat-operators-nsmvk\" (UID: \"5cb26ae3-4fcb-4bcb-8118-1471510b9589\") " pod="openshift-marketplace/redhat-operators-nsmvk" Nov 26 14:25:52 crc kubenswrapper[5037]: I1126 14:25:52.878645 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-44q72\" (UniqueName: \"kubernetes.io/projected/5cb26ae3-4fcb-4bcb-8118-1471510b9589-kube-api-access-44q72\") pod \"redhat-operators-nsmvk\" (UID: \"5cb26ae3-4fcb-4bcb-8118-1471510b9589\") " pod="openshift-marketplace/redhat-operators-nsmvk" Nov 26 14:25:52 crc kubenswrapper[5037]: I1126 14:25:52.879158 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5cb26ae3-4fcb-4bcb-8118-1471510b9589-catalog-content\") pod \"redhat-operators-nsmvk\" (UID: \"5cb26ae3-4fcb-4bcb-8118-1471510b9589\") " pod="openshift-marketplace/redhat-operators-nsmvk" Nov 26 14:25:52 crc kubenswrapper[5037]: I1126 14:25:52.879491 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5cb26ae3-4fcb-4bcb-8118-1471510b9589-utilities\") pod \"redhat-operators-nsmvk\" (UID: \"5cb26ae3-4fcb-4bcb-8118-1471510b9589\") " pod="openshift-marketplace/redhat-operators-nsmvk" Nov 26 14:25:52 crc kubenswrapper[5037]: I1126 14:25:52.882923 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-pfb76"] Nov 26 14:25:52 crc kubenswrapper[5037]: I1126 14:25:52.884463 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pfb76" Nov 26 14:25:52 crc kubenswrapper[5037]: I1126 14:25:52.886880 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 26 14:25:52 crc kubenswrapper[5037]: I1126 14:25:52.897021 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pfb76"] Nov 26 14:25:52 crc kubenswrapper[5037]: I1126 14:25:52.917701 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-44q72\" (UniqueName: \"kubernetes.io/projected/5cb26ae3-4fcb-4bcb-8118-1471510b9589-kube-api-access-44q72\") pod \"redhat-operators-nsmvk\" (UID: \"5cb26ae3-4fcb-4bcb-8118-1471510b9589\") " pod="openshift-marketplace/redhat-operators-nsmvk" Nov 26 14:25:52 crc kubenswrapper[5037]: I1126 14:25:52.981128 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a666cf42-14be-48a6-825d-65f7888c45a5-utilities\") pod \"certified-operators-pfb76\" (UID: \"a666cf42-14be-48a6-825d-65f7888c45a5\") " pod="openshift-marketplace/certified-operators-pfb76" Nov 26 14:25:52 crc kubenswrapper[5037]: I1126 14:25:52.981403 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a666cf42-14be-48a6-825d-65f7888c45a5-catalog-content\") pod \"certified-operators-pfb76\" (UID: \"a666cf42-14be-48a6-825d-65f7888c45a5\") " pod="openshift-marketplace/certified-operators-pfb76" Nov 26 14:25:52 crc kubenswrapper[5037]: I1126 14:25:52.981573 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cjlj5\" (UniqueName: \"kubernetes.io/projected/a666cf42-14be-48a6-825d-65f7888c45a5-kube-api-access-cjlj5\") pod \"certified-operators-pfb76\" (UID: \"a666cf42-14be-48a6-825d-65f7888c45a5\") " pod="openshift-marketplace/certified-operators-pfb76" Nov 26 14:25:53 crc kubenswrapper[5037]: I1126 14:25:53.035606 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nsmvk" Nov 26 14:25:53 crc kubenswrapper[5037]: I1126 14:25:53.085761 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cjlj5\" (UniqueName: \"kubernetes.io/projected/a666cf42-14be-48a6-825d-65f7888c45a5-kube-api-access-cjlj5\") pod \"certified-operators-pfb76\" (UID: \"a666cf42-14be-48a6-825d-65f7888c45a5\") " pod="openshift-marketplace/certified-operators-pfb76" Nov 26 14:25:53 crc kubenswrapper[5037]: I1126 14:25:53.085890 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a666cf42-14be-48a6-825d-65f7888c45a5-utilities\") pod \"certified-operators-pfb76\" (UID: \"a666cf42-14be-48a6-825d-65f7888c45a5\") " pod="openshift-marketplace/certified-operators-pfb76" Nov 26 14:25:53 crc kubenswrapper[5037]: I1126 14:25:53.086034 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a666cf42-14be-48a6-825d-65f7888c45a5-catalog-content\") pod \"certified-operators-pfb76\" (UID: \"a666cf42-14be-48a6-825d-65f7888c45a5\") " pod="openshift-marketplace/certified-operators-pfb76" Nov 26 14:25:53 crc kubenswrapper[5037]: I1126 14:25:53.086853 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a666cf42-14be-48a6-825d-65f7888c45a5-utilities\") pod \"certified-operators-pfb76\" (UID: \"a666cf42-14be-48a6-825d-65f7888c45a5\") " pod="openshift-marketplace/certified-operators-pfb76" Nov 26 14:25:53 crc kubenswrapper[5037]: I1126 14:25:53.086853 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a666cf42-14be-48a6-825d-65f7888c45a5-catalog-content\") pod \"certified-operators-pfb76\" (UID: \"a666cf42-14be-48a6-825d-65f7888c45a5\") " pod="openshift-marketplace/certified-operators-pfb76" Nov 26 14:25:53 crc kubenswrapper[5037]: I1126 14:25:53.115578 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cjlj5\" (UniqueName: \"kubernetes.io/projected/a666cf42-14be-48a6-825d-65f7888c45a5-kube-api-access-cjlj5\") pod \"certified-operators-pfb76\" (UID: \"a666cf42-14be-48a6-825d-65f7888c45a5\") " pod="openshift-marketplace/certified-operators-pfb76" Nov 26 14:25:53 crc kubenswrapper[5037]: I1126 14:25:53.244304 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pfb76" Nov 26 14:25:53 crc kubenswrapper[5037]: I1126 14:25:53.478133 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nsmvk"] Nov 26 14:25:53 crc kubenswrapper[5037]: I1126 14:25:53.659106 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pfb76"] Nov 26 14:25:53 crc kubenswrapper[5037]: W1126 14:25:53.665971 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda666cf42_14be_48a6_825d_65f7888c45a5.slice/crio-69b407d544bdd9436549d9139e679f59935e44eb2ba260999cf6bdb47315b7cd WatchSource:0}: Error finding container 69b407d544bdd9436549d9139e679f59935e44eb2ba260999cf6bdb47315b7cd: Status 404 returned error can't find the container with id 69b407d544bdd9436549d9139e679f59935e44eb2ba260999cf6bdb47315b7cd Nov 26 14:25:53 crc kubenswrapper[5037]: I1126 14:25:53.991169 5037 generic.go:334] "Generic (PLEG): container finished" podID="a666cf42-14be-48a6-825d-65f7888c45a5" containerID="e33565215826889ff2c2181ed758cc93e518b6c7a24817da7374e4daf12e5266" exitCode=0 Nov 26 14:25:53 crc kubenswrapper[5037]: I1126 14:25:53.991271 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pfb76" event={"ID":"a666cf42-14be-48a6-825d-65f7888c45a5","Type":"ContainerDied","Data":"e33565215826889ff2c2181ed758cc93e518b6c7a24817da7374e4daf12e5266"} Nov 26 14:25:53 crc kubenswrapper[5037]: I1126 14:25:53.991384 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pfb76" event={"ID":"a666cf42-14be-48a6-825d-65f7888c45a5","Type":"ContainerStarted","Data":"69b407d544bdd9436549d9139e679f59935e44eb2ba260999cf6bdb47315b7cd"} Nov 26 14:25:53 crc kubenswrapper[5037]: I1126 14:25:53.993534 5037 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 14:25:53 crc kubenswrapper[5037]: I1126 14:25:53.996720 5037 generic.go:334] "Generic (PLEG): container finished" podID="5cb26ae3-4fcb-4bcb-8118-1471510b9589" containerID="a4ede36d5f46d6592d0d0583da88c182e1d9f5c99627b8e7af5d4ae9ea211907" exitCode=0 Nov 26 14:25:53 crc kubenswrapper[5037]: I1126 14:25:53.996775 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nsmvk" event={"ID":"5cb26ae3-4fcb-4bcb-8118-1471510b9589","Type":"ContainerDied","Data":"a4ede36d5f46d6592d0d0583da88c182e1d9f5c99627b8e7af5d4ae9ea211907"} Nov 26 14:25:53 crc kubenswrapper[5037]: I1126 14:25:53.996812 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nsmvk" event={"ID":"5cb26ae3-4fcb-4bcb-8118-1471510b9589","Type":"ContainerStarted","Data":"e46a4de58148fda5a426212da6b6489682894c3592ce283301e46f6c6e3418b9"} Nov 26 14:25:55 crc kubenswrapper[5037]: I1126 14:25:55.006808 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nsmvk" event={"ID":"5cb26ae3-4fcb-4bcb-8118-1471510b9589","Type":"ContainerStarted","Data":"761b2c498529854e53a5a002292581a542614e1f0945b6547ee267cead3c0177"} Nov 26 14:25:55 crc kubenswrapper[5037]: I1126 14:25:55.081863 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-fvms6"] Nov 26 14:25:55 crc kubenswrapper[5037]: I1126 14:25:55.082981 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fvms6" Nov 26 14:25:55 crc kubenswrapper[5037]: I1126 14:25:55.089912 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 26 14:25:55 crc kubenswrapper[5037]: I1126 14:25:55.115428 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fvms6"] Nov 26 14:25:55 crc kubenswrapper[5037]: I1126 14:25:55.121301 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bgtcw\" (UniqueName: \"kubernetes.io/projected/f80c86bc-3752-494b-baa4-07549c0c183c-kube-api-access-bgtcw\") pod \"community-operators-fvms6\" (UID: \"f80c86bc-3752-494b-baa4-07549c0c183c\") " pod="openshift-marketplace/community-operators-fvms6" Nov 26 14:25:55 crc kubenswrapper[5037]: I1126 14:25:55.121360 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f80c86bc-3752-494b-baa4-07549c0c183c-catalog-content\") pod \"community-operators-fvms6\" (UID: \"f80c86bc-3752-494b-baa4-07549c0c183c\") " pod="openshift-marketplace/community-operators-fvms6" Nov 26 14:25:55 crc kubenswrapper[5037]: I1126 14:25:55.121411 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f80c86bc-3752-494b-baa4-07549c0c183c-utilities\") pod \"community-operators-fvms6\" (UID: \"f80c86bc-3752-494b-baa4-07549c0c183c\") " pod="openshift-marketplace/community-operators-fvms6" Nov 26 14:25:55 crc kubenswrapper[5037]: I1126 14:25:55.222434 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bgtcw\" (UniqueName: \"kubernetes.io/projected/f80c86bc-3752-494b-baa4-07549c0c183c-kube-api-access-bgtcw\") pod \"community-operators-fvms6\" (UID: \"f80c86bc-3752-494b-baa4-07549c0c183c\") " pod="openshift-marketplace/community-operators-fvms6" Nov 26 14:25:55 crc kubenswrapper[5037]: I1126 14:25:55.222507 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f80c86bc-3752-494b-baa4-07549c0c183c-catalog-content\") pod \"community-operators-fvms6\" (UID: \"f80c86bc-3752-494b-baa4-07549c0c183c\") " pod="openshift-marketplace/community-operators-fvms6" Nov 26 14:25:55 crc kubenswrapper[5037]: I1126 14:25:55.222547 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f80c86bc-3752-494b-baa4-07549c0c183c-utilities\") pod \"community-operators-fvms6\" (UID: \"f80c86bc-3752-494b-baa4-07549c0c183c\") " pod="openshift-marketplace/community-operators-fvms6" Nov 26 14:25:55 crc kubenswrapper[5037]: I1126 14:25:55.223192 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f80c86bc-3752-494b-baa4-07549c0c183c-utilities\") pod \"community-operators-fvms6\" (UID: \"f80c86bc-3752-494b-baa4-07549c0c183c\") " pod="openshift-marketplace/community-operators-fvms6" Nov 26 14:25:55 crc kubenswrapper[5037]: I1126 14:25:55.223532 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f80c86bc-3752-494b-baa4-07549c0c183c-catalog-content\") pod \"community-operators-fvms6\" (UID: \"f80c86bc-3752-494b-baa4-07549c0c183c\") " pod="openshift-marketplace/community-operators-fvms6" Nov 26 14:25:55 crc kubenswrapper[5037]: I1126 14:25:55.257639 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bgtcw\" (UniqueName: \"kubernetes.io/projected/f80c86bc-3752-494b-baa4-07549c0c183c-kube-api-access-bgtcw\") pod \"community-operators-fvms6\" (UID: \"f80c86bc-3752-494b-baa4-07549c0c183c\") " pod="openshift-marketplace/community-operators-fvms6" Nov 26 14:25:55 crc kubenswrapper[5037]: I1126 14:25:55.280980 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-24x6j"] Nov 26 14:25:55 crc kubenswrapper[5037]: I1126 14:25:55.282237 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-24x6j" Nov 26 14:25:55 crc kubenswrapper[5037]: I1126 14:25:55.285984 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 26 14:25:55 crc kubenswrapper[5037]: I1126 14:25:55.293192 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-24x6j"] Nov 26 14:25:55 crc kubenswrapper[5037]: I1126 14:25:55.327493 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f458ec30-f647-43d3-9a84-c611ebeb918d-catalog-content\") pod \"redhat-marketplace-24x6j\" (UID: \"f458ec30-f647-43d3-9a84-c611ebeb918d\") " pod="openshift-marketplace/redhat-marketplace-24x6j" Nov 26 14:25:55 crc kubenswrapper[5037]: I1126 14:25:55.327604 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wg8m4\" (UniqueName: \"kubernetes.io/projected/f458ec30-f647-43d3-9a84-c611ebeb918d-kube-api-access-wg8m4\") pod \"redhat-marketplace-24x6j\" (UID: \"f458ec30-f647-43d3-9a84-c611ebeb918d\") " pod="openshift-marketplace/redhat-marketplace-24x6j" Nov 26 14:25:55 crc kubenswrapper[5037]: I1126 14:25:55.327680 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f458ec30-f647-43d3-9a84-c611ebeb918d-utilities\") pod \"redhat-marketplace-24x6j\" (UID: \"f458ec30-f647-43d3-9a84-c611ebeb918d\") " pod="openshift-marketplace/redhat-marketplace-24x6j" Nov 26 14:25:55 crc kubenswrapper[5037]: I1126 14:25:55.429194 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f458ec30-f647-43d3-9a84-c611ebeb918d-utilities\") pod \"redhat-marketplace-24x6j\" (UID: \"f458ec30-f647-43d3-9a84-c611ebeb918d\") " pod="openshift-marketplace/redhat-marketplace-24x6j" Nov 26 14:25:55 crc kubenswrapper[5037]: I1126 14:25:55.429368 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f458ec30-f647-43d3-9a84-c611ebeb918d-catalog-content\") pod \"redhat-marketplace-24x6j\" (UID: \"f458ec30-f647-43d3-9a84-c611ebeb918d\") " pod="openshift-marketplace/redhat-marketplace-24x6j" Nov 26 14:25:55 crc kubenswrapper[5037]: I1126 14:25:55.429479 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wg8m4\" (UniqueName: \"kubernetes.io/projected/f458ec30-f647-43d3-9a84-c611ebeb918d-kube-api-access-wg8m4\") pod \"redhat-marketplace-24x6j\" (UID: \"f458ec30-f647-43d3-9a84-c611ebeb918d\") " pod="openshift-marketplace/redhat-marketplace-24x6j" Nov 26 14:25:55 crc kubenswrapper[5037]: I1126 14:25:55.429891 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f458ec30-f647-43d3-9a84-c611ebeb918d-utilities\") pod \"redhat-marketplace-24x6j\" (UID: \"f458ec30-f647-43d3-9a84-c611ebeb918d\") " pod="openshift-marketplace/redhat-marketplace-24x6j" Nov 26 14:25:55 crc kubenswrapper[5037]: I1126 14:25:55.430024 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f458ec30-f647-43d3-9a84-c611ebeb918d-catalog-content\") pod \"redhat-marketplace-24x6j\" (UID: \"f458ec30-f647-43d3-9a84-c611ebeb918d\") " pod="openshift-marketplace/redhat-marketplace-24x6j" Nov 26 14:25:55 crc kubenswrapper[5037]: I1126 14:25:55.448867 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wg8m4\" (UniqueName: \"kubernetes.io/projected/f458ec30-f647-43d3-9a84-c611ebeb918d-kube-api-access-wg8m4\") pod \"redhat-marketplace-24x6j\" (UID: \"f458ec30-f647-43d3-9a84-c611ebeb918d\") " pod="openshift-marketplace/redhat-marketplace-24x6j" Nov 26 14:25:55 crc kubenswrapper[5037]: I1126 14:25:55.465666 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fvms6" Nov 26 14:25:55 crc kubenswrapper[5037]: I1126 14:25:55.644472 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-24x6j" Nov 26 14:25:55 crc kubenswrapper[5037]: I1126 14:25:55.896696 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fvms6"] Nov 26 14:25:55 crc kubenswrapper[5037]: W1126 14:25:55.902871 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf80c86bc_3752_494b_baa4_07549c0c183c.slice/crio-a0ab9679c76ee24defd57ce92d09b09455da201b534d71c26875db1af37c4388 WatchSource:0}: Error finding container a0ab9679c76ee24defd57ce92d09b09455da201b534d71c26875db1af37c4388: Status 404 returned error can't find the container with id a0ab9679c76ee24defd57ce92d09b09455da201b534d71c26875db1af37c4388 Nov 26 14:25:56 crc kubenswrapper[5037]: I1126 14:25:56.015331 5037 generic.go:334] "Generic (PLEG): container finished" podID="a666cf42-14be-48a6-825d-65f7888c45a5" containerID="55e0803fe9e66245c36e5bdc9fe525dba7c395475a45a4c9b3bae96c9c6b4e75" exitCode=0 Nov 26 14:25:56 crc kubenswrapper[5037]: I1126 14:25:56.015472 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pfb76" event={"ID":"a666cf42-14be-48a6-825d-65f7888c45a5","Type":"ContainerDied","Data":"55e0803fe9e66245c36e5bdc9fe525dba7c395475a45a4c9b3bae96c9c6b4e75"} Nov 26 14:25:56 crc kubenswrapper[5037]: I1126 14:25:56.017267 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fvms6" event={"ID":"f80c86bc-3752-494b-baa4-07549c0c183c","Type":"ContainerStarted","Data":"a0ab9679c76ee24defd57ce92d09b09455da201b534d71c26875db1af37c4388"} Nov 26 14:25:56 crc kubenswrapper[5037]: I1126 14:25:56.020673 5037 generic.go:334] "Generic (PLEG): container finished" podID="5cb26ae3-4fcb-4bcb-8118-1471510b9589" containerID="761b2c498529854e53a5a002292581a542614e1f0945b6547ee267cead3c0177" exitCode=0 Nov 26 14:25:56 crc kubenswrapper[5037]: I1126 14:25:56.020727 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nsmvk" event={"ID":"5cb26ae3-4fcb-4bcb-8118-1471510b9589","Type":"ContainerDied","Data":"761b2c498529854e53a5a002292581a542614e1f0945b6547ee267cead3c0177"} Nov 26 14:25:56 crc kubenswrapper[5037]: I1126 14:25:56.056164 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-24x6j"] Nov 26 14:25:56 crc kubenswrapper[5037]: W1126 14:25:56.098501 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf458ec30_f647_43d3_9a84_c611ebeb918d.slice/crio-1818d4f390bae8cf071330b9febbcc299375e853e7eda17ea569abc542adaa6e WatchSource:0}: Error finding container 1818d4f390bae8cf071330b9febbcc299375e853e7eda17ea569abc542adaa6e: Status 404 returned error can't find the container with id 1818d4f390bae8cf071330b9febbcc299375e853e7eda17ea569abc542adaa6e Nov 26 14:25:57 crc kubenswrapper[5037]: I1126 14:25:57.028813 5037 generic.go:334] "Generic (PLEG): container finished" podID="f458ec30-f647-43d3-9a84-c611ebeb918d" containerID="5a5caa0a0ebbbcdd2cc07167245fb683a123ab2b66efa82372f7ecc84783d2ce" exitCode=0 Nov 26 14:25:57 crc kubenswrapper[5037]: I1126 14:25:57.028906 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-24x6j" event={"ID":"f458ec30-f647-43d3-9a84-c611ebeb918d","Type":"ContainerDied","Data":"5a5caa0a0ebbbcdd2cc07167245fb683a123ab2b66efa82372f7ecc84783d2ce"} Nov 26 14:25:57 crc kubenswrapper[5037]: I1126 14:25:57.029493 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-24x6j" event={"ID":"f458ec30-f647-43d3-9a84-c611ebeb918d","Type":"ContainerStarted","Data":"1818d4f390bae8cf071330b9febbcc299375e853e7eda17ea569abc542adaa6e"} Nov 26 14:25:57 crc kubenswrapper[5037]: I1126 14:25:57.033690 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pfb76" event={"ID":"a666cf42-14be-48a6-825d-65f7888c45a5","Type":"ContainerStarted","Data":"a75c4fb3856dbec91d253de1eae25a260f371952bac3015c5884f6d4f35eda6a"} Nov 26 14:25:57 crc kubenswrapper[5037]: I1126 14:25:57.036141 5037 generic.go:334] "Generic (PLEG): container finished" podID="f80c86bc-3752-494b-baa4-07549c0c183c" containerID="cb842309812bd09d8c7783b91c41ce0c0aca29379de172505334f9078f2e28e7" exitCode=0 Nov 26 14:25:57 crc kubenswrapper[5037]: I1126 14:25:57.036251 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fvms6" event={"ID":"f80c86bc-3752-494b-baa4-07549c0c183c","Type":"ContainerDied","Data":"cb842309812bd09d8c7783b91c41ce0c0aca29379de172505334f9078f2e28e7"} Nov 26 14:25:57 crc kubenswrapper[5037]: I1126 14:25:57.041680 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nsmvk" event={"ID":"5cb26ae3-4fcb-4bcb-8118-1471510b9589","Type":"ContainerStarted","Data":"c15a58b5d2b1867ed1df328147178abb67b09d2bb67adf1f02e71f438cd9cb27"} Nov 26 14:25:57 crc kubenswrapper[5037]: I1126 14:25:57.100759 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-nsmvk" podStartSLOduration=2.532095857 podStartE2EDuration="5.100731476s" podCreationTimestamp="2025-11-26 14:25:52 +0000 UTC" firstStartedPulling="2025-11-26 14:25:53.999062385 +0000 UTC m=+620.795832569" lastFinishedPulling="2025-11-26 14:25:56.567698004 +0000 UTC m=+623.364468188" observedRunningTime="2025-11-26 14:25:57.094208252 +0000 UTC m=+623.890978446" watchObservedRunningTime="2025-11-26 14:25:57.100731476 +0000 UTC m=+623.897501660" Nov 26 14:25:57 crc kubenswrapper[5037]: I1126 14:25:57.119981 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-pfb76" podStartSLOduration=2.363652525 podStartE2EDuration="5.119953771s" podCreationTimestamp="2025-11-26 14:25:52 +0000 UTC" firstStartedPulling="2025-11-26 14:25:53.993341109 +0000 UTC m=+620.790111283" lastFinishedPulling="2025-11-26 14:25:56.749642315 +0000 UTC m=+623.546412529" observedRunningTime="2025-11-26 14:25:57.114794079 +0000 UTC m=+623.911564273" watchObservedRunningTime="2025-11-26 14:25:57.119953771 +0000 UTC m=+623.916723955" Nov 26 14:25:58 crc kubenswrapper[5037]: I1126 14:25:58.056796 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-24x6j" event={"ID":"f458ec30-f647-43d3-9a84-c611ebeb918d","Type":"ContainerStarted","Data":"0e4844c7633fb030bf7d87bdcefd82db3726c7265cbdd2a63e32a46e9497db2e"} Nov 26 14:25:59 crc kubenswrapper[5037]: I1126 14:25:59.068105 5037 generic.go:334] "Generic (PLEG): container finished" podID="f458ec30-f647-43d3-9a84-c611ebeb918d" containerID="0e4844c7633fb030bf7d87bdcefd82db3726c7265cbdd2a63e32a46e9497db2e" exitCode=0 Nov 26 14:25:59 crc kubenswrapper[5037]: I1126 14:25:59.068173 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-24x6j" event={"ID":"f458ec30-f647-43d3-9a84-c611ebeb918d","Type":"ContainerDied","Data":"0e4844c7633fb030bf7d87bdcefd82db3726c7265cbdd2a63e32a46e9497db2e"} Nov 26 14:25:59 crc kubenswrapper[5037]: I1126 14:25:59.071601 5037 generic.go:334] "Generic (PLEG): container finished" podID="f80c86bc-3752-494b-baa4-07549c0c183c" containerID="3a21ef9438d9ddfb8b34f259c1d3002f316cfb26c8d7b9bbac4c363c310d9387" exitCode=0 Nov 26 14:25:59 crc kubenswrapper[5037]: I1126 14:25:59.071650 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fvms6" event={"ID":"f80c86bc-3752-494b-baa4-07549c0c183c","Type":"ContainerDied","Data":"3a21ef9438d9ddfb8b34f259c1d3002f316cfb26c8d7b9bbac4c363c310d9387"} Nov 26 14:26:00 crc kubenswrapper[5037]: I1126 14:26:00.080874 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-24x6j" event={"ID":"f458ec30-f647-43d3-9a84-c611ebeb918d","Type":"ContainerStarted","Data":"0c9ae5b6e3bc995557f235b3f8990a8a1de728adf4bd783c5e79d8ebb22c7698"} Nov 26 14:26:00 crc kubenswrapper[5037]: I1126 14:26:00.084064 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fvms6" event={"ID":"f80c86bc-3752-494b-baa4-07549c0c183c","Type":"ContainerStarted","Data":"da18f09819ed97e63ae4b76a6a0a364dd921b07937d6228515929a3c19735639"} Nov 26 14:26:00 crc kubenswrapper[5037]: I1126 14:26:00.099981 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-24x6j" podStartSLOduration=2.561564311 podStartE2EDuration="5.099958786s" podCreationTimestamp="2025-11-26 14:25:55 +0000 UTC" firstStartedPulling="2025-11-26 14:25:57.030562477 +0000 UTC m=+623.827332661" lastFinishedPulling="2025-11-26 14:25:59.568956942 +0000 UTC m=+626.365727136" observedRunningTime="2025-11-26 14:26:00.098428329 +0000 UTC m=+626.895198533" watchObservedRunningTime="2025-11-26 14:26:00.099958786 +0000 UTC m=+626.896728970" Nov 26 14:26:00 crc kubenswrapper[5037]: I1126 14:26:00.117204 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-fvms6" podStartSLOduration=2.564797907 podStartE2EDuration="5.117189233s" podCreationTimestamp="2025-11-26 14:25:55 +0000 UTC" firstStartedPulling="2025-11-26 14:25:57.037723186 +0000 UTC m=+623.834493370" lastFinishedPulling="2025-11-26 14:25:59.590114502 +0000 UTC m=+626.386884696" observedRunningTime="2025-11-26 14:26:00.116271272 +0000 UTC m=+626.913041466" watchObservedRunningTime="2025-11-26 14:26:00.117189233 +0000 UTC m=+626.913959417" Nov 26 14:26:03 crc kubenswrapper[5037]: I1126 14:26:03.036774 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-nsmvk" Nov 26 14:26:03 crc kubenswrapper[5037]: I1126 14:26:03.037334 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-nsmvk" Nov 26 14:26:03 crc kubenswrapper[5037]: I1126 14:26:03.093958 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-nsmvk" Nov 26 14:26:03 crc kubenswrapper[5037]: I1126 14:26:03.145265 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-nsmvk" Nov 26 14:26:03 crc kubenswrapper[5037]: I1126 14:26:03.245330 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-pfb76" Nov 26 14:26:03 crc kubenswrapper[5037]: I1126 14:26:03.245403 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-pfb76" Nov 26 14:26:03 crc kubenswrapper[5037]: I1126 14:26:03.285572 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-pfb76" Nov 26 14:26:04 crc kubenswrapper[5037]: I1126 14:26:04.157492 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-pfb76" Nov 26 14:26:05 crc kubenswrapper[5037]: I1126 14:26:05.466107 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-fvms6" Nov 26 14:26:05 crc kubenswrapper[5037]: I1126 14:26:05.466179 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-fvms6" Nov 26 14:26:05 crc kubenswrapper[5037]: I1126 14:26:05.516117 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-fvms6" Nov 26 14:26:05 crc kubenswrapper[5037]: I1126 14:26:05.644996 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-24x6j" Nov 26 14:26:05 crc kubenswrapper[5037]: I1126 14:26:05.645081 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-24x6j" Nov 26 14:26:05 crc kubenswrapper[5037]: I1126 14:26:05.699068 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-24x6j" Nov 26 14:26:06 crc kubenswrapper[5037]: I1126 14:26:06.195971 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-fvms6" Nov 26 14:26:06 crc kubenswrapper[5037]: I1126 14:26:06.196391 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-24x6j" Nov 26 14:26:19 crc kubenswrapper[5037]: I1126 14:26:19.754550 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-67c667d7c4-hpswp"] Nov 26 14:26:19 crc kubenswrapper[5037]: I1126 14:26:19.755690 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-67c667d7c4-hpswp" podUID="5423cfc4-24c6-485d-a87a-5cd9141d9f17" containerName="route-controller-manager" containerID="cri-o://a42ef1d0e78d8ea5c980a9062c6d549c55d9ee1a0fb10f7233ab51ec3cd2ffcd" gracePeriod=30 Nov 26 14:26:20 crc kubenswrapper[5037]: I1126 14:26:20.148007 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-67c667d7c4-hpswp" Nov 26 14:26:20 crc kubenswrapper[5037]: I1126 14:26:20.239588 5037 generic.go:334] "Generic (PLEG): container finished" podID="5423cfc4-24c6-485d-a87a-5cd9141d9f17" containerID="a42ef1d0e78d8ea5c980a9062c6d549c55d9ee1a0fb10f7233ab51ec3cd2ffcd" exitCode=0 Nov 26 14:26:20 crc kubenswrapper[5037]: I1126 14:26:20.239668 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-67c667d7c4-hpswp" Nov 26 14:26:20 crc kubenswrapper[5037]: I1126 14:26:20.239661 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-67c667d7c4-hpswp" event={"ID":"5423cfc4-24c6-485d-a87a-5cd9141d9f17","Type":"ContainerDied","Data":"a42ef1d0e78d8ea5c980a9062c6d549c55d9ee1a0fb10f7233ab51ec3cd2ffcd"} Nov 26 14:26:20 crc kubenswrapper[5037]: I1126 14:26:20.239730 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-67c667d7c4-hpswp" event={"ID":"5423cfc4-24c6-485d-a87a-5cd9141d9f17","Type":"ContainerDied","Data":"b01df39958b0f468f00b0d18bffa95bba842c3f47b008d46c64f2b29e02ace88"} Nov 26 14:26:20 crc kubenswrapper[5037]: I1126 14:26:20.239756 5037 scope.go:117] "RemoveContainer" containerID="a42ef1d0e78d8ea5c980a9062c6d549c55d9ee1a0fb10f7233ab51ec3cd2ffcd" Nov 26 14:26:20 crc kubenswrapper[5037]: I1126 14:26:20.255193 5037 scope.go:117] "RemoveContainer" containerID="a42ef1d0e78d8ea5c980a9062c6d549c55d9ee1a0fb10f7233ab51ec3cd2ffcd" Nov 26 14:26:20 crc kubenswrapper[5037]: E1126 14:26:20.255772 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a42ef1d0e78d8ea5c980a9062c6d549c55d9ee1a0fb10f7233ab51ec3cd2ffcd\": container with ID starting with a42ef1d0e78d8ea5c980a9062c6d549c55d9ee1a0fb10f7233ab51ec3cd2ffcd not found: ID does not exist" containerID="a42ef1d0e78d8ea5c980a9062c6d549c55d9ee1a0fb10f7233ab51ec3cd2ffcd" Nov 26 14:26:20 crc kubenswrapper[5037]: I1126 14:26:20.255850 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a42ef1d0e78d8ea5c980a9062c6d549c55d9ee1a0fb10f7233ab51ec3cd2ffcd"} err="failed to get container status \"a42ef1d0e78d8ea5c980a9062c6d549c55d9ee1a0fb10f7233ab51ec3cd2ffcd\": rpc error: code = NotFound desc = could not find container \"a42ef1d0e78d8ea5c980a9062c6d549c55d9ee1a0fb10f7233ab51ec3cd2ffcd\": container with ID starting with a42ef1d0e78d8ea5c980a9062c6d549c55d9ee1a0fb10f7233ab51ec3cd2ffcd not found: ID does not exist" Nov 26 14:26:20 crc kubenswrapper[5037]: I1126 14:26:20.302578 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5423cfc4-24c6-485d-a87a-5cd9141d9f17-client-ca\") pod \"5423cfc4-24c6-485d-a87a-5cd9141d9f17\" (UID: \"5423cfc4-24c6-485d-a87a-5cd9141d9f17\") " Nov 26 14:26:20 crc kubenswrapper[5037]: I1126 14:26:20.302651 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jzxbk\" (UniqueName: \"kubernetes.io/projected/5423cfc4-24c6-485d-a87a-5cd9141d9f17-kube-api-access-jzxbk\") pod \"5423cfc4-24c6-485d-a87a-5cd9141d9f17\" (UID: \"5423cfc4-24c6-485d-a87a-5cd9141d9f17\") " Nov 26 14:26:20 crc kubenswrapper[5037]: I1126 14:26:20.302716 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5423cfc4-24c6-485d-a87a-5cd9141d9f17-serving-cert\") pod \"5423cfc4-24c6-485d-a87a-5cd9141d9f17\" (UID: \"5423cfc4-24c6-485d-a87a-5cd9141d9f17\") " Nov 26 14:26:20 crc kubenswrapper[5037]: I1126 14:26:20.302769 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5423cfc4-24c6-485d-a87a-5cd9141d9f17-config\") pod \"5423cfc4-24c6-485d-a87a-5cd9141d9f17\" (UID: \"5423cfc4-24c6-485d-a87a-5cd9141d9f17\") " Nov 26 14:26:20 crc kubenswrapper[5037]: I1126 14:26:20.303566 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5423cfc4-24c6-485d-a87a-5cd9141d9f17-client-ca" (OuterVolumeSpecName: "client-ca") pod "5423cfc4-24c6-485d-a87a-5cd9141d9f17" (UID: "5423cfc4-24c6-485d-a87a-5cd9141d9f17"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:26:20 crc kubenswrapper[5037]: I1126 14:26:20.303672 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5423cfc4-24c6-485d-a87a-5cd9141d9f17-config" (OuterVolumeSpecName: "config") pod "5423cfc4-24c6-485d-a87a-5cd9141d9f17" (UID: "5423cfc4-24c6-485d-a87a-5cd9141d9f17"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:26:20 crc kubenswrapper[5037]: I1126 14:26:20.308434 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5423cfc4-24c6-485d-a87a-5cd9141d9f17-kube-api-access-jzxbk" (OuterVolumeSpecName: "kube-api-access-jzxbk") pod "5423cfc4-24c6-485d-a87a-5cd9141d9f17" (UID: "5423cfc4-24c6-485d-a87a-5cd9141d9f17"). InnerVolumeSpecName "kube-api-access-jzxbk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:26:20 crc kubenswrapper[5037]: I1126 14:26:20.308990 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5423cfc4-24c6-485d-a87a-5cd9141d9f17-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5423cfc4-24c6-485d-a87a-5cd9141d9f17" (UID: "5423cfc4-24c6-485d-a87a-5cd9141d9f17"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:26:20 crc kubenswrapper[5037]: I1126 14:26:20.404335 5037 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5423cfc4-24c6-485d-a87a-5cd9141d9f17-client-ca\") on node \"crc\" DevicePath \"\"" Nov 26 14:26:20 crc kubenswrapper[5037]: I1126 14:26:20.404392 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jzxbk\" (UniqueName: \"kubernetes.io/projected/5423cfc4-24c6-485d-a87a-5cd9141d9f17-kube-api-access-jzxbk\") on node \"crc\" DevicePath \"\"" Nov 26 14:26:20 crc kubenswrapper[5037]: I1126 14:26:20.404415 5037 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5423cfc4-24c6-485d-a87a-5cd9141d9f17-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 14:26:20 crc kubenswrapper[5037]: I1126 14:26:20.404437 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5423cfc4-24c6-485d-a87a-5cd9141d9f17-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:26:20 crc kubenswrapper[5037]: I1126 14:26:20.575476 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-67c667d7c4-hpswp"] Nov 26 14:26:20 crc kubenswrapper[5037]: I1126 14:26:20.579244 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-67c667d7c4-hpswp"] Nov 26 14:26:20 crc kubenswrapper[5037]: I1126 14:26:20.988909 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-548787bc4f-m64rq"] Nov 26 14:26:20 crc kubenswrapper[5037]: E1126 14:26:20.989986 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5423cfc4-24c6-485d-a87a-5cd9141d9f17" containerName="route-controller-manager" Nov 26 14:26:20 crc kubenswrapper[5037]: I1126 14:26:20.990252 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="5423cfc4-24c6-485d-a87a-5cd9141d9f17" containerName="route-controller-manager" Nov 26 14:26:20 crc kubenswrapper[5037]: I1126 14:26:20.990448 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="5423cfc4-24c6-485d-a87a-5cd9141d9f17" containerName="route-controller-manager" Nov 26 14:26:20 crc kubenswrapper[5037]: I1126 14:26:20.990853 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-548787bc4f-m64rq" Nov 26 14:26:20 crc kubenswrapper[5037]: I1126 14:26:20.993950 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 26 14:26:20 crc kubenswrapper[5037]: I1126 14:26:20.994622 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 26 14:26:20 crc kubenswrapper[5037]: I1126 14:26:20.997145 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 26 14:26:20 crc kubenswrapper[5037]: I1126 14:26:20.997176 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 26 14:26:20 crc kubenswrapper[5037]: I1126 14:26:20.998406 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 26 14:26:20 crc kubenswrapper[5037]: I1126 14:26:20.998841 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 26 14:26:21 crc kubenswrapper[5037]: I1126 14:26:21.006790 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-548787bc4f-m64rq"] Nov 26 14:26:21 crc kubenswrapper[5037]: I1126 14:26:21.112116 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/42b724de-dc39-42f5-8fac-91c61a56b563-config\") pod \"route-controller-manager-548787bc4f-m64rq\" (UID: \"42b724de-dc39-42f5-8fac-91c61a56b563\") " pod="openshift-route-controller-manager/route-controller-manager-548787bc4f-m64rq" Nov 26 14:26:21 crc kubenswrapper[5037]: I1126 14:26:21.112254 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/42b724de-dc39-42f5-8fac-91c61a56b563-client-ca\") pod \"route-controller-manager-548787bc4f-m64rq\" (UID: \"42b724de-dc39-42f5-8fac-91c61a56b563\") " pod="openshift-route-controller-manager/route-controller-manager-548787bc4f-m64rq" Nov 26 14:26:21 crc kubenswrapper[5037]: I1126 14:26:21.112385 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/42b724de-dc39-42f5-8fac-91c61a56b563-serving-cert\") pod \"route-controller-manager-548787bc4f-m64rq\" (UID: \"42b724de-dc39-42f5-8fac-91c61a56b563\") " pod="openshift-route-controller-manager/route-controller-manager-548787bc4f-m64rq" Nov 26 14:26:21 crc kubenswrapper[5037]: I1126 14:26:21.112528 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2cwlq\" (UniqueName: \"kubernetes.io/projected/42b724de-dc39-42f5-8fac-91c61a56b563-kube-api-access-2cwlq\") pod \"route-controller-manager-548787bc4f-m64rq\" (UID: \"42b724de-dc39-42f5-8fac-91c61a56b563\") " pod="openshift-route-controller-manager/route-controller-manager-548787bc4f-m64rq" Nov 26 14:26:21 crc kubenswrapper[5037]: I1126 14:26:21.214059 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2cwlq\" (UniqueName: \"kubernetes.io/projected/42b724de-dc39-42f5-8fac-91c61a56b563-kube-api-access-2cwlq\") pod \"route-controller-manager-548787bc4f-m64rq\" (UID: \"42b724de-dc39-42f5-8fac-91c61a56b563\") " pod="openshift-route-controller-manager/route-controller-manager-548787bc4f-m64rq" Nov 26 14:26:21 crc kubenswrapper[5037]: I1126 14:26:21.214125 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/42b724de-dc39-42f5-8fac-91c61a56b563-config\") pod \"route-controller-manager-548787bc4f-m64rq\" (UID: \"42b724de-dc39-42f5-8fac-91c61a56b563\") " pod="openshift-route-controller-manager/route-controller-manager-548787bc4f-m64rq" Nov 26 14:26:21 crc kubenswrapper[5037]: I1126 14:26:21.214170 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/42b724de-dc39-42f5-8fac-91c61a56b563-client-ca\") pod \"route-controller-manager-548787bc4f-m64rq\" (UID: \"42b724de-dc39-42f5-8fac-91c61a56b563\") " pod="openshift-route-controller-manager/route-controller-manager-548787bc4f-m64rq" Nov 26 14:26:21 crc kubenswrapper[5037]: I1126 14:26:21.214187 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/42b724de-dc39-42f5-8fac-91c61a56b563-serving-cert\") pod \"route-controller-manager-548787bc4f-m64rq\" (UID: \"42b724de-dc39-42f5-8fac-91c61a56b563\") " pod="openshift-route-controller-manager/route-controller-manager-548787bc4f-m64rq" Nov 26 14:26:21 crc kubenswrapper[5037]: I1126 14:26:21.215860 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/42b724de-dc39-42f5-8fac-91c61a56b563-client-ca\") pod \"route-controller-manager-548787bc4f-m64rq\" (UID: \"42b724de-dc39-42f5-8fac-91c61a56b563\") " pod="openshift-route-controller-manager/route-controller-manager-548787bc4f-m64rq" Nov 26 14:26:21 crc kubenswrapper[5037]: I1126 14:26:21.215946 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/42b724de-dc39-42f5-8fac-91c61a56b563-config\") pod \"route-controller-manager-548787bc4f-m64rq\" (UID: \"42b724de-dc39-42f5-8fac-91c61a56b563\") " pod="openshift-route-controller-manager/route-controller-manager-548787bc4f-m64rq" Nov 26 14:26:21 crc kubenswrapper[5037]: I1126 14:26:21.221488 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/42b724de-dc39-42f5-8fac-91c61a56b563-serving-cert\") pod \"route-controller-manager-548787bc4f-m64rq\" (UID: \"42b724de-dc39-42f5-8fac-91c61a56b563\") " pod="openshift-route-controller-manager/route-controller-manager-548787bc4f-m64rq" Nov 26 14:26:21 crc kubenswrapper[5037]: I1126 14:26:21.247861 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2cwlq\" (UniqueName: \"kubernetes.io/projected/42b724de-dc39-42f5-8fac-91c61a56b563-kube-api-access-2cwlq\") pod \"route-controller-manager-548787bc4f-m64rq\" (UID: \"42b724de-dc39-42f5-8fac-91c61a56b563\") " pod="openshift-route-controller-manager/route-controller-manager-548787bc4f-m64rq" Nov 26 14:26:21 crc kubenswrapper[5037]: I1126 14:26:21.310051 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-548787bc4f-m64rq" Nov 26 14:26:21 crc kubenswrapper[5037]: I1126 14:26:21.829014 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-548787bc4f-m64rq"] Nov 26 14:26:21 crc kubenswrapper[5037]: I1126 14:26:21.917154 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5423cfc4-24c6-485d-a87a-5cd9141d9f17" path="/var/lib/kubelet/pods/5423cfc4-24c6-485d-a87a-5cd9141d9f17/volumes" Nov 26 14:26:22 crc kubenswrapper[5037]: I1126 14:26:22.254579 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-548787bc4f-m64rq" event={"ID":"42b724de-dc39-42f5-8fac-91c61a56b563","Type":"ContainerStarted","Data":"a057e7c40fafa48b02e5d68cfd5658179d3df4c484a10e4e2123df8b6656a3d2"} Nov 26 14:26:22 crc kubenswrapper[5037]: I1126 14:26:22.254624 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-548787bc4f-m64rq" event={"ID":"42b724de-dc39-42f5-8fac-91c61a56b563","Type":"ContainerStarted","Data":"07f4f79a7523d4c238d1501a5cd1c20f327d42e9534e28d0abe3c1edaf6aa45d"} Nov 26 14:26:22 crc kubenswrapper[5037]: I1126 14:26:22.254881 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-548787bc4f-m64rq" Nov 26 14:26:22 crc kubenswrapper[5037]: I1126 14:26:22.272250 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-548787bc4f-m64rq" podStartSLOduration=3.272231556 podStartE2EDuration="3.272231556s" podCreationTimestamp="2025-11-26 14:26:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:26:22.268954558 +0000 UTC m=+649.065724762" watchObservedRunningTime="2025-11-26 14:26:22.272231556 +0000 UTC m=+649.069001750" Nov 26 14:26:22 crc kubenswrapper[5037]: I1126 14:26:22.349396 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-548787bc4f-m64rq" Nov 26 14:26:39 crc kubenswrapper[5037]: I1126 14:26:39.782137 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-69bb556988-7rtlm"] Nov 26 14:26:39 crc kubenswrapper[5037]: I1126 14:26:39.783594 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-69bb556988-7rtlm" podUID="917ea42e-70fd-4dd6-adae-da0fdd3f5846" containerName="controller-manager" containerID="cri-o://5d96053b5281b58d548b184acf9ce081e9b06d055fa469343c4bffc8c3288846" gracePeriod=30 Nov 26 14:26:40 crc kubenswrapper[5037]: I1126 14:26:40.149042 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-69bb556988-7rtlm" Nov 26 14:26:40 crc kubenswrapper[5037]: I1126 14:26:40.266388 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/917ea42e-70fd-4dd6-adae-da0fdd3f5846-config\") pod \"917ea42e-70fd-4dd6-adae-da0fdd3f5846\" (UID: \"917ea42e-70fd-4dd6-adae-da0fdd3f5846\") " Nov 26 14:26:40 crc kubenswrapper[5037]: I1126 14:26:40.266554 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/917ea42e-70fd-4dd6-adae-da0fdd3f5846-proxy-ca-bundles\") pod \"917ea42e-70fd-4dd6-adae-da0fdd3f5846\" (UID: \"917ea42e-70fd-4dd6-adae-da0fdd3f5846\") " Nov 26 14:26:40 crc kubenswrapper[5037]: I1126 14:26:40.266591 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qwckw\" (UniqueName: \"kubernetes.io/projected/917ea42e-70fd-4dd6-adae-da0fdd3f5846-kube-api-access-qwckw\") pod \"917ea42e-70fd-4dd6-adae-da0fdd3f5846\" (UID: \"917ea42e-70fd-4dd6-adae-da0fdd3f5846\") " Nov 26 14:26:40 crc kubenswrapper[5037]: I1126 14:26:40.266618 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/917ea42e-70fd-4dd6-adae-da0fdd3f5846-client-ca\") pod \"917ea42e-70fd-4dd6-adae-da0fdd3f5846\" (UID: \"917ea42e-70fd-4dd6-adae-da0fdd3f5846\") " Nov 26 14:26:40 crc kubenswrapper[5037]: I1126 14:26:40.266664 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/917ea42e-70fd-4dd6-adae-da0fdd3f5846-serving-cert\") pod \"917ea42e-70fd-4dd6-adae-da0fdd3f5846\" (UID: \"917ea42e-70fd-4dd6-adae-da0fdd3f5846\") " Nov 26 14:26:40 crc kubenswrapper[5037]: I1126 14:26:40.267252 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/917ea42e-70fd-4dd6-adae-da0fdd3f5846-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "917ea42e-70fd-4dd6-adae-da0fdd3f5846" (UID: "917ea42e-70fd-4dd6-adae-da0fdd3f5846"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:26:40 crc kubenswrapper[5037]: I1126 14:26:40.267385 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/917ea42e-70fd-4dd6-adae-da0fdd3f5846-client-ca" (OuterVolumeSpecName: "client-ca") pod "917ea42e-70fd-4dd6-adae-da0fdd3f5846" (UID: "917ea42e-70fd-4dd6-adae-da0fdd3f5846"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:26:40 crc kubenswrapper[5037]: I1126 14:26:40.267840 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/917ea42e-70fd-4dd6-adae-da0fdd3f5846-config" (OuterVolumeSpecName: "config") pod "917ea42e-70fd-4dd6-adae-da0fdd3f5846" (UID: "917ea42e-70fd-4dd6-adae-da0fdd3f5846"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:26:40 crc kubenswrapper[5037]: I1126 14:26:40.272454 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/917ea42e-70fd-4dd6-adae-da0fdd3f5846-kube-api-access-qwckw" (OuterVolumeSpecName: "kube-api-access-qwckw") pod "917ea42e-70fd-4dd6-adae-da0fdd3f5846" (UID: "917ea42e-70fd-4dd6-adae-da0fdd3f5846"). InnerVolumeSpecName "kube-api-access-qwckw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:26:40 crc kubenswrapper[5037]: I1126 14:26:40.272461 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/917ea42e-70fd-4dd6-adae-da0fdd3f5846-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "917ea42e-70fd-4dd6-adae-da0fdd3f5846" (UID: "917ea42e-70fd-4dd6-adae-da0fdd3f5846"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:26:40 crc kubenswrapper[5037]: I1126 14:26:40.364063 5037 generic.go:334] "Generic (PLEG): container finished" podID="917ea42e-70fd-4dd6-adae-da0fdd3f5846" containerID="5d96053b5281b58d548b184acf9ce081e9b06d055fa469343c4bffc8c3288846" exitCode=0 Nov 26 14:26:40 crc kubenswrapper[5037]: I1126 14:26:40.364104 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-69bb556988-7rtlm" event={"ID":"917ea42e-70fd-4dd6-adae-da0fdd3f5846","Type":"ContainerDied","Data":"5d96053b5281b58d548b184acf9ce081e9b06d055fa469343c4bffc8c3288846"} Nov 26 14:26:40 crc kubenswrapper[5037]: I1126 14:26:40.364138 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-69bb556988-7rtlm" event={"ID":"917ea42e-70fd-4dd6-adae-da0fdd3f5846","Type":"ContainerDied","Data":"e8c4b0309ab439484202e9a5d401f03e118ff1097ddcc789510da3c51d647b95"} Nov 26 14:26:40 crc kubenswrapper[5037]: I1126 14:26:40.364157 5037 scope.go:117] "RemoveContainer" containerID="5d96053b5281b58d548b184acf9ce081e9b06d055fa469343c4bffc8c3288846" Nov 26 14:26:40 crc kubenswrapper[5037]: I1126 14:26:40.364158 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-69bb556988-7rtlm" Nov 26 14:26:40 crc kubenswrapper[5037]: I1126 14:26:40.368081 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/917ea42e-70fd-4dd6-adae-da0fdd3f5846-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:26:40 crc kubenswrapper[5037]: I1126 14:26:40.368113 5037 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/917ea42e-70fd-4dd6-adae-da0fdd3f5846-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 26 14:26:40 crc kubenswrapper[5037]: I1126 14:26:40.368127 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qwckw\" (UniqueName: \"kubernetes.io/projected/917ea42e-70fd-4dd6-adae-da0fdd3f5846-kube-api-access-qwckw\") on node \"crc\" DevicePath \"\"" Nov 26 14:26:40 crc kubenswrapper[5037]: I1126 14:26:40.368139 5037 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/917ea42e-70fd-4dd6-adae-da0fdd3f5846-client-ca\") on node \"crc\" DevicePath \"\"" Nov 26 14:26:40 crc kubenswrapper[5037]: I1126 14:26:40.368156 5037 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/917ea42e-70fd-4dd6-adae-da0fdd3f5846-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 14:26:40 crc kubenswrapper[5037]: I1126 14:26:40.385412 5037 scope.go:117] "RemoveContainer" containerID="5d96053b5281b58d548b184acf9ce081e9b06d055fa469343c4bffc8c3288846" Nov 26 14:26:40 crc kubenswrapper[5037]: E1126 14:26:40.385976 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5d96053b5281b58d548b184acf9ce081e9b06d055fa469343c4bffc8c3288846\": container with ID starting with 5d96053b5281b58d548b184acf9ce081e9b06d055fa469343c4bffc8c3288846 not found: ID does not exist" containerID="5d96053b5281b58d548b184acf9ce081e9b06d055fa469343c4bffc8c3288846" Nov 26 14:26:40 crc kubenswrapper[5037]: I1126 14:26:40.386008 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5d96053b5281b58d548b184acf9ce081e9b06d055fa469343c4bffc8c3288846"} err="failed to get container status \"5d96053b5281b58d548b184acf9ce081e9b06d055fa469343c4bffc8c3288846\": rpc error: code = NotFound desc = could not find container \"5d96053b5281b58d548b184acf9ce081e9b06d055fa469343c4bffc8c3288846\": container with ID starting with 5d96053b5281b58d548b184acf9ce081e9b06d055fa469343c4bffc8c3288846 not found: ID does not exist" Nov 26 14:26:40 crc kubenswrapper[5037]: I1126 14:26:40.398538 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-69bb556988-7rtlm"] Nov 26 14:26:40 crc kubenswrapper[5037]: I1126 14:26:40.402647 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-69bb556988-7rtlm"] Nov 26 14:26:41 crc kubenswrapper[5037]: I1126 14:26:41.007226 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-68c8c7bb47-d9bv8"] Nov 26 14:26:41 crc kubenswrapper[5037]: E1126 14:26:41.007689 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="917ea42e-70fd-4dd6-adae-da0fdd3f5846" containerName="controller-manager" Nov 26 14:26:41 crc kubenswrapper[5037]: I1126 14:26:41.007716 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="917ea42e-70fd-4dd6-adae-da0fdd3f5846" containerName="controller-manager" Nov 26 14:26:41 crc kubenswrapper[5037]: I1126 14:26:41.007957 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="917ea42e-70fd-4dd6-adae-da0fdd3f5846" containerName="controller-manager" Nov 26 14:26:41 crc kubenswrapper[5037]: I1126 14:26:41.008805 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-68c8c7bb47-d9bv8" Nov 26 14:26:41 crc kubenswrapper[5037]: I1126 14:26:41.011342 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 26 14:26:41 crc kubenswrapper[5037]: I1126 14:26:41.012730 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 26 14:26:41 crc kubenswrapper[5037]: I1126 14:26:41.013523 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 26 14:26:41 crc kubenswrapper[5037]: I1126 14:26:41.013570 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 26 14:26:41 crc kubenswrapper[5037]: I1126 14:26:41.013606 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 26 14:26:41 crc kubenswrapper[5037]: I1126 14:26:41.014123 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 26 14:26:41 crc kubenswrapper[5037]: I1126 14:26:41.019825 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 26 14:26:41 crc kubenswrapper[5037]: I1126 14:26:41.021797 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-68c8c7bb47-d9bv8"] Nov 26 14:26:41 crc kubenswrapper[5037]: I1126 14:26:41.078688 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nb8g6\" (UniqueName: \"kubernetes.io/projected/127d342c-2e8c-47b0-a34d-3ed95567ce0d-kube-api-access-nb8g6\") pod \"controller-manager-68c8c7bb47-d9bv8\" (UID: \"127d342c-2e8c-47b0-a34d-3ed95567ce0d\") " pod="openshift-controller-manager/controller-manager-68c8c7bb47-d9bv8" Nov 26 14:26:41 crc kubenswrapper[5037]: I1126 14:26:41.078868 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/127d342c-2e8c-47b0-a34d-3ed95567ce0d-proxy-ca-bundles\") pod \"controller-manager-68c8c7bb47-d9bv8\" (UID: \"127d342c-2e8c-47b0-a34d-3ed95567ce0d\") " pod="openshift-controller-manager/controller-manager-68c8c7bb47-d9bv8" Nov 26 14:26:41 crc kubenswrapper[5037]: I1126 14:26:41.079014 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/127d342c-2e8c-47b0-a34d-3ed95567ce0d-client-ca\") pod \"controller-manager-68c8c7bb47-d9bv8\" (UID: \"127d342c-2e8c-47b0-a34d-3ed95567ce0d\") " pod="openshift-controller-manager/controller-manager-68c8c7bb47-d9bv8" Nov 26 14:26:41 crc kubenswrapper[5037]: I1126 14:26:41.079090 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/127d342c-2e8c-47b0-a34d-3ed95567ce0d-serving-cert\") pod \"controller-manager-68c8c7bb47-d9bv8\" (UID: \"127d342c-2e8c-47b0-a34d-3ed95567ce0d\") " pod="openshift-controller-manager/controller-manager-68c8c7bb47-d9bv8" Nov 26 14:26:41 crc kubenswrapper[5037]: I1126 14:26:41.079229 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/127d342c-2e8c-47b0-a34d-3ed95567ce0d-config\") pod \"controller-manager-68c8c7bb47-d9bv8\" (UID: \"127d342c-2e8c-47b0-a34d-3ed95567ce0d\") " pod="openshift-controller-manager/controller-manager-68c8c7bb47-d9bv8" Nov 26 14:26:41 crc kubenswrapper[5037]: I1126 14:26:41.179988 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/127d342c-2e8c-47b0-a34d-3ed95567ce0d-client-ca\") pod \"controller-manager-68c8c7bb47-d9bv8\" (UID: \"127d342c-2e8c-47b0-a34d-3ed95567ce0d\") " pod="openshift-controller-manager/controller-manager-68c8c7bb47-d9bv8" Nov 26 14:26:41 crc kubenswrapper[5037]: I1126 14:26:41.180148 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/127d342c-2e8c-47b0-a34d-3ed95567ce0d-serving-cert\") pod \"controller-manager-68c8c7bb47-d9bv8\" (UID: \"127d342c-2e8c-47b0-a34d-3ed95567ce0d\") " pod="openshift-controller-manager/controller-manager-68c8c7bb47-d9bv8" Nov 26 14:26:41 crc kubenswrapper[5037]: I1126 14:26:41.180188 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/127d342c-2e8c-47b0-a34d-3ed95567ce0d-config\") pod \"controller-manager-68c8c7bb47-d9bv8\" (UID: \"127d342c-2e8c-47b0-a34d-3ed95567ce0d\") " pod="openshift-controller-manager/controller-manager-68c8c7bb47-d9bv8" Nov 26 14:26:41 crc kubenswrapper[5037]: I1126 14:26:41.180230 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nb8g6\" (UniqueName: \"kubernetes.io/projected/127d342c-2e8c-47b0-a34d-3ed95567ce0d-kube-api-access-nb8g6\") pod \"controller-manager-68c8c7bb47-d9bv8\" (UID: \"127d342c-2e8c-47b0-a34d-3ed95567ce0d\") " pod="openshift-controller-manager/controller-manager-68c8c7bb47-d9bv8" Nov 26 14:26:41 crc kubenswrapper[5037]: I1126 14:26:41.180279 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/127d342c-2e8c-47b0-a34d-3ed95567ce0d-proxy-ca-bundles\") pod \"controller-manager-68c8c7bb47-d9bv8\" (UID: \"127d342c-2e8c-47b0-a34d-3ed95567ce0d\") " pod="openshift-controller-manager/controller-manager-68c8c7bb47-d9bv8" Nov 26 14:26:41 crc kubenswrapper[5037]: I1126 14:26:41.181200 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/127d342c-2e8c-47b0-a34d-3ed95567ce0d-client-ca\") pod \"controller-manager-68c8c7bb47-d9bv8\" (UID: \"127d342c-2e8c-47b0-a34d-3ed95567ce0d\") " pod="openshift-controller-manager/controller-manager-68c8c7bb47-d9bv8" Nov 26 14:26:41 crc kubenswrapper[5037]: I1126 14:26:41.181691 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/127d342c-2e8c-47b0-a34d-3ed95567ce0d-config\") pod \"controller-manager-68c8c7bb47-d9bv8\" (UID: \"127d342c-2e8c-47b0-a34d-3ed95567ce0d\") " pod="openshift-controller-manager/controller-manager-68c8c7bb47-d9bv8" Nov 26 14:26:41 crc kubenswrapper[5037]: I1126 14:26:41.181763 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/127d342c-2e8c-47b0-a34d-3ed95567ce0d-proxy-ca-bundles\") pod \"controller-manager-68c8c7bb47-d9bv8\" (UID: \"127d342c-2e8c-47b0-a34d-3ed95567ce0d\") " pod="openshift-controller-manager/controller-manager-68c8c7bb47-d9bv8" Nov 26 14:26:41 crc kubenswrapper[5037]: I1126 14:26:41.187230 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/127d342c-2e8c-47b0-a34d-3ed95567ce0d-serving-cert\") pod \"controller-manager-68c8c7bb47-d9bv8\" (UID: \"127d342c-2e8c-47b0-a34d-3ed95567ce0d\") " pod="openshift-controller-manager/controller-manager-68c8c7bb47-d9bv8" Nov 26 14:26:41 crc kubenswrapper[5037]: I1126 14:26:41.206329 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nb8g6\" (UniqueName: \"kubernetes.io/projected/127d342c-2e8c-47b0-a34d-3ed95567ce0d-kube-api-access-nb8g6\") pod \"controller-manager-68c8c7bb47-d9bv8\" (UID: \"127d342c-2e8c-47b0-a34d-3ed95567ce0d\") " pod="openshift-controller-manager/controller-manager-68c8c7bb47-d9bv8" Nov 26 14:26:41 crc kubenswrapper[5037]: I1126 14:26:41.337179 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-68c8c7bb47-d9bv8" Nov 26 14:26:41 crc kubenswrapper[5037]: I1126 14:26:41.563980 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-68c8c7bb47-d9bv8"] Nov 26 14:26:41 crc kubenswrapper[5037]: W1126 14:26:41.578668 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod127d342c_2e8c_47b0_a34d_3ed95567ce0d.slice/crio-88eb8a79f25ae5f46061f031769b2f6dc25958da3995e5bc3658f236ab29e35a WatchSource:0}: Error finding container 88eb8a79f25ae5f46061f031769b2f6dc25958da3995e5bc3658f236ab29e35a: Status 404 returned error can't find the container with id 88eb8a79f25ae5f46061f031769b2f6dc25958da3995e5bc3658f236ab29e35a Nov 26 14:26:41 crc kubenswrapper[5037]: I1126 14:26:41.917201 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="917ea42e-70fd-4dd6-adae-da0fdd3f5846" path="/var/lib/kubelet/pods/917ea42e-70fd-4dd6-adae-da0fdd3f5846/volumes" Nov 26 14:26:42 crc kubenswrapper[5037]: I1126 14:26:42.379668 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-68c8c7bb47-d9bv8" event={"ID":"127d342c-2e8c-47b0-a34d-3ed95567ce0d","Type":"ContainerStarted","Data":"8d0051fda1badc51c952c0618386f7e085c2ba136cb1552e9db7fee716e308bf"} Nov 26 14:26:42 crc kubenswrapper[5037]: I1126 14:26:42.380003 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-68c8c7bb47-d9bv8" Nov 26 14:26:42 crc kubenswrapper[5037]: I1126 14:26:42.380018 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-68c8c7bb47-d9bv8" event={"ID":"127d342c-2e8c-47b0-a34d-3ed95567ce0d","Type":"ContainerStarted","Data":"88eb8a79f25ae5f46061f031769b2f6dc25958da3995e5bc3658f236ab29e35a"} Nov 26 14:26:42 crc kubenswrapper[5037]: I1126 14:26:42.388344 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-68c8c7bb47-d9bv8" Nov 26 14:26:42 crc kubenswrapper[5037]: I1126 14:26:42.396440 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-68c8c7bb47-d9bv8" podStartSLOduration=3.396425236 podStartE2EDuration="3.396425236s" podCreationTimestamp="2025-11-26 14:26:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:26:42.394627782 +0000 UTC m=+669.191397966" watchObservedRunningTime="2025-11-26 14:26:42.396425236 +0000 UTC m=+669.193195420" Nov 26 14:27:11 crc kubenswrapper[5037]: I1126 14:27:11.247087 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 14:27:11 crc kubenswrapper[5037]: I1126 14:27:11.247928 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 14:27:41 crc kubenswrapper[5037]: I1126 14:27:41.246928 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 14:27:41 crc kubenswrapper[5037]: I1126 14:27:41.248037 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 14:28:11 crc kubenswrapper[5037]: I1126 14:28:11.236493 5037 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 26 14:28:11 crc kubenswrapper[5037]: I1126 14:28:11.247999 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 14:28:11 crc kubenswrapper[5037]: I1126 14:28:11.248115 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 14:28:11 crc kubenswrapper[5037]: I1126 14:28:11.248193 5037 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" Nov 26 14:28:11 crc kubenswrapper[5037]: I1126 14:28:11.249096 5037 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"349f155942970ffc9212c9698596c47d1a0002439affbcd295ff8f4a649f33b4"} pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 14:28:11 crc kubenswrapper[5037]: I1126 14:28:11.249192 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" containerID="cri-o://349f155942970ffc9212c9698596c47d1a0002439affbcd295ff8f4a649f33b4" gracePeriod=600 Nov 26 14:28:11 crc kubenswrapper[5037]: I1126 14:28:11.992394 5037 generic.go:334] "Generic (PLEG): container finished" podID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerID="349f155942970ffc9212c9698596c47d1a0002439affbcd295ff8f4a649f33b4" exitCode=0 Nov 26 14:28:11 crc kubenswrapper[5037]: I1126 14:28:11.992450 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" event={"ID":"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb","Type":"ContainerDied","Data":"349f155942970ffc9212c9698596c47d1a0002439affbcd295ff8f4a649f33b4"} Nov 26 14:28:11 crc kubenswrapper[5037]: I1126 14:28:11.992823 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" event={"ID":"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb","Type":"ContainerStarted","Data":"ff44b46a3dc466f256d3b6fac132034130b6623577a5e3d570d1982ec2c3ae66"} Nov 26 14:28:11 crc kubenswrapper[5037]: I1126 14:28:11.992848 5037 scope.go:117] "RemoveContainer" containerID="b7afa716ab555c514aa4b783f55103f0b795f534b642704349668ad1f4f2718c" Nov 26 14:28:42 crc kubenswrapper[5037]: I1126 14:28:42.073579 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-nzbd4"] Nov 26 14:28:42 crc kubenswrapper[5037]: I1126 14:28:42.076589 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nzbd4" Nov 26 14:28:42 crc kubenswrapper[5037]: I1126 14:28:42.090264 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nzbd4"] Nov 26 14:28:42 crc kubenswrapper[5037]: I1126 14:28:42.248638 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l8m2d\" (UniqueName: \"kubernetes.io/projected/e7e4e130-fda2-45b2-b399-b2aefb8c6be7-kube-api-access-l8m2d\") pod \"redhat-operators-nzbd4\" (UID: \"e7e4e130-fda2-45b2-b399-b2aefb8c6be7\") " pod="openshift-marketplace/redhat-operators-nzbd4" Nov 26 14:28:42 crc kubenswrapper[5037]: I1126 14:28:42.248719 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e7e4e130-fda2-45b2-b399-b2aefb8c6be7-catalog-content\") pod \"redhat-operators-nzbd4\" (UID: \"e7e4e130-fda2-45b2-b399-b2aefb8c6be7\") " pod="openshift-marketplace/redhat-operators-nzbd4" Nov 26 14:28:42 crc kubenswrapper[5037]: I1126 14:28:42.248823 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e7e4e130-fda2-45b2-b399-b2aefb8c6be7-utilities\") pod \"redhat-operators-nzbd4\" (UID: \"e7e4e130-fda2-45b2-b399-b2aefb8c6be7\") " pod="openshift-marketplace/redhat-operators-nzbd4" Nov 26 14:28:42 crc kubenswrapper[5037]: I1126 14:28:42.350493 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e7e4e130-fda2-45b2-b399-b2aefb8c6be7-utilities\") pod \"redhat-operators-nzbd4\" (UID: \"e7e4e130-fda2-45b2-b399-b2aefb8c6be7\") " pod="openshift-marketplace/redhat-operators-nzbd4" Nov 26 14:28:42 crc kubenswrapper[5037]: I1126 14:28:42.350573 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l8m2d\" (UniqueName: \"kubernetes.io/projected/e7e4e130-fda2-45b2-b399-b2aefb8c6be7-kube-api-access-l8m2d\") pod \"redhat-operators-nzbd4\" (UID: \"e7e4e130-fda2-45b2-b399-b2aefb8c6be7\") " pod="openshift-marketplace/redhat-operators-nzbd4" Nov 26 14:28:42 crc kubenswrapper[5037]: I1126 14:28:42.350614 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e7e4e130-fda2-45b2-b399-b2aefb8c6be7-catalog-content\") pod \"redhat-operators-nzbd4\" (UID: \"e7e4e130-fda2-45b2-b399-b2aefb8c6be7\") " pod="openshift-marketplace/redhat-operators-nzbd4" Nov 26 14:28:42 crc kubenswrapper[5037]: I1126 14:28:42.351106 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e7e4e130-fda2-45b2-b399-b2aefb8c6be7-utilities\") pod \"redhat-operators-nzbd4\" (UID: \"e7e4e130-fda2-45b2-b399-b2aefb8c6be7\") " pod="openshift-marketplace/redhat-operators-nzbd4" Nov 26 14:28:42 crc kubenswrapper[5037]: I1126 14:28:42.351204 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e7e4e130-fda2-45b2-b399-b2aefb8c6be7-catalog-content\") pod \"redhat-operators-nzbd4\" (UID: \"e7e4e130-fda2-45b2-b399-b2aefb8c6be7\") " pod="openshift-marketplace/redhat-operators-nzbd4" Nov 26 14:28:42 crc kubenswrapper[5037]: I1126 14:28:42.377578 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l8m2d\" (UniqueName: \"kubernetes.io/projected/e7e4e130-fda2-45b2-b399-b2aefb8c6be7-kube-api-access-l8m2d\") pod \"redhat-operators-nzbd4\" (UID: \"e7e4e130-fda2-45b2-b399-b2aefb8c6be7\") " pod="openshift-marketplace/redhat-operators-nzbd4" Nov 26 14:28:42 crc kubenswrapper[5037]: I1126 14:28:42.410351 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nzbd4" Nov 26 14:28:42 crc kubenswrapper[5037]: I1126 14:28:42.838111 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nzbd4"] Nov 26 14:28:43 crc kubenswrapper[5037]: I1126 14:28:43.191813 5037 generic.go:334] "Generic (PLEG): container finished" podID="e7e4e130-fda2-45b2-b399-b2aefb8c6be7" containerID="7967850533de33104deff520eae5c41491f54f5d885336a3218d43dc625bdf93" exitCode=0 Nov 26 14:28:43 crc kubenswrapper[5037]: I1126 14:28:43.191883 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nzbd4" event={"ID":"e7e4e130-fda2-45b2-b399-b2aefb8c6be7","Type":"ContainerDied","Data":"7967850533de33104deff520eae5c41491f54f5d885336a3218d43dc625bdf93"} Nov 26 14:28:43 crc kubenswrapper[5037]: I1126 14:28:43.192141 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nzbd4" event={"ID":"e7e4e130-fda2-45b2-b399-b2aefb8c6be7","Type":"ContainerStarted","Data":"4dce5cb18ad61f0ea4b25a681b97d0c155c08f9a0ddd215a98e2a3aa17d4fe52"} Nov 26 14:28:44 crc kubenswrapper[5037]: I1126 14:28:44.201221 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nzbd4" event={"ID":"e7e4e130-fda2-45b2-b399-b2aefb8c6be7","Type":"ContainerStarted","Data":"14f117ec2d9e6899bbbbc0a843ad051af40e93dd8a11557cf83f9ebb3f451f61"} Nov 26 14:28:45 crc kubenswrapper[5037]: I1126 14:28:45.210321 5037 generic.go:334] "Generic (PLEG): container finished" podID="e7e4e130-fda2-45b2-b399-b2aefb8c6be7" containerID="14f117ec2d9e6899bbbbc0a843ad051af40e93dd8a11557cf83f9ebb3f451f61" exitCode=0 Nov 26 14:28:45 crc kubenswrapper[5037]: I1126 14:28:45.210398 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nzbd4" event={"ID":"e7e4e130-fda2-45b2-b399-b2aefb8c6be7","Type":"ContainerDied","Data":"14f117ec2d9e6899bbbbc0a843ad051af40e93dd8a11557cf83f9ebb3f451f61"} Nov 26 14:28:46 crc kubenswrapper[5037]: I1126 14:28:46.219444 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nzbd4" event={"ID":"e7e4e130-fda2-45b2-b399-b2aefb8c6be7","Type":"ContainerStarted","Data":"587339f4f17890bd3248303241ac5fb0ef6f8006b35bc9557c71c548dbcc614c"} Nov 26 14:28:46 crc kubenswrapper[5037]: I1126 14:28:46.240882 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-nzbd4" podStartSLOduration=1.7279808 podStartE2EDuration="4.240860683s" podCreationTimestamp="2025-11-26 14:28:42 +0000 UTC" firstStartedPulling="2025-11-26 14:28:43.193590654 +0000 UTC m=+789.990360838" lastFinishedPulling="2025-11-26 14:28:45.706470537 +0000 UTC m=+792.503240721" observedRunningTime="2025-11-26 14:28:46.238466744 +0000 UTC m=+793.035236948" watchObservedRunningTime="2025-11-26 14:28:46.240860683 +0000 UTC m=+793.037630867" Nov 26 14:28:52 crc kubenswrapper[5037]: I1126 14:28:52.410860 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-nzbd4" Nov 26 14:28:52 crc kubenswrapper[5037]: I1126 14:28:52.410950 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-nzbd4" Nov 26 14:28:52 crc kubenswrapper[5037]: I1126 14:28:52.481627 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-nzbd4" Nov 26 14:28:53 crc kubenswrapper[5037]: I1126 14:28:53.333586 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-nzbd4" Nov 26 14:28:53 crc kubenswrapper[5037]: I1126 14:28:53.371593 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nzbd4"] Nov 26 14:28:55 crc kubenswrapper[5037]: I1126 14:28:55.279199 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-nzbd4" podUID="e7e4e130-fda2-45b2-b399-b2aefb8c6be7" containerName="registry-server" containerID="cri-o://587339f4f17890bd3248303241ac5fb0ef6f8006b35bc9557c71c548dbcc614c" gracePeriod=2 Nov 26 14:28:56 crc kubenswrapper[5037]: I1126 14:28:56.289051 5037 generic.go:334] "Generic (PLEG): container finished" podID="e7e4e130-fda2-45b2-b399-b2aefb8c6be7" containerID="587339f4f17890bd3248303241ac5fb0ef6f8006b35bc9557c71c548dbcc614c" exitCode=0 Nov 26 14:28:56 crc kubenswrapper[5037]: I1126 14:28:56.289169 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nzbd4" event={"ID":"e7e4e130-fda2-45b2-b399-b2aefb8c6be7","Type":"ContainerDied","Data":"587339f4f17890bd3248303241ac5fb0ef6f8006b35bc9557c71c548dbcc614c"} Nov 26 14:28:56 crc kubenswrapper[5037]: I1126 14:28:56.646797 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nzbd4" Nov 26 14:28:56 crc kubenswrapper[5037]: I1126 14:28:56.755853 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l8m2d\" (UniqueName: \"kubernetes.io/projected/e7e4e130-fda2-45b2-b399-b2aefb8c6be7-kube-api-access-l8m2d\") pod \"e7e4e130-fda2-45b2-b399-b2aefb8c6be7\" (UID: \"e7e4e130-fda2-45b2-b399-b2aefb8c6be7\") " Nov 26 14:28:56 crc kubenswrapper[5037]: I1126 14:28:56.755932 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e7e4e130-fda2-45b2-b399-b2aefb8c6be7-catalog-content\") pod \"e7e4e130-fda2-45b2-b399-b2aefb8c6be7\" (UID: \"e7e4e130-fda2-45b2-b399-b2aefb8c6be7\") " Nov 26 14:28:56 crc kubenswrapper[5037]: I1126 14:28:56.755984 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e7e4e130-fda2-45b2-b399-b2aefb8c6be7-utilities\") pod \"e7e4e130-fda2-45b2-b399-b2aefb8c6be7\" (UID: \"e7e4e130-fda2-45b2-b399-b2aefb8c6be7\") " Nov 26 14:28:56 crc kubenswrapper[5037]: I1126 14:28:56.758103 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e7e4e130-fda2-45b2-b399-b2aefb8c6be7-utilities" (OuterVolumeSpecName: "utilities") pod "e7e4e130-fda2-45b2-b399-b2aefb8c6be7" (UID: "e7e4e130-fda2-45b2-b399-b2aefb8c6be7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:28:56 crc kubenswrapper[5037]: I1126 14:28:56.761819 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e4e130-fda2-45b2-b399-b2aefb8c6be7-kube-api-access-l8m2d" (OuterVolumeSpecName: "kube-api-access-l8m2d") pod "e7e4e130-fda2-45b2-b399-b2aefb8c6be7" (UID: "e7e4e130-fda2-45b2-b399-b2aefb8c6be7"). InnerVolumeSpecName "kube-api-access-l8m2d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:28:56 crc kubenswrapper[5037]: I1126 14:28:56.857579 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l8m2d\" (UniqueName: \"kubernetes.io/projected/e7e4e130-fda2-45b2-b399-b2aefb8c6be7-kube-api-access-l8m2d\") on node \"crc\" DevicePath \"\"" Nov 26 14:28:56 crc kubenswrapper[5037]: I1126 14:28:56.857617 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e7e4e130-fda2-45b2-b399-b2aefb8c6be7-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 14:28:56 crc kubenswrapper[5037]: I1126 14:28:56.911191 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e7e4e130-fda2-45b2-b399-b2aefb8c6be7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e7e4e130-fda2-45b2-b399-b2aefb8c6be7" (UID: "e7e4e130-fda2-45b2-b399-b2aefb8c6be7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:28:56 crc kubenswrapper[5037]: I1126 14:28:56.958947 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e7e4e130-fda2-45b2-b399-b2aefb8c6be7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 14:28:57 crc kubenswrapper[5037]: I1126 14:28:57.299789 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nzbd4" event={"ID":"e7e4e130-fda2-45b2-b399-b2aefb8c6be7","Type":"ContainerDied","Data":"4dce5cb18ad61f0ea4b25a681b97d0c155c08f9a0ddd215a98e2a3aa17d4fe52"} Nov 26 14:28:57 crc kubenswrapper[5037]: I1126 14:28:57.299904 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nzbd4" Nov 26 14:28:57 crc kubenswrapper[5037]: I1126 14:28:57.300997 5037 scope.go:117] "RemoveContainer" containerID="587339f4f17890bd3248303241ac5fb0ef6f8006b35bc9557c71c548dbcc614c" Nov 26 14:28:57 crc kubenswrapper[5037]: I1126 14:28:57.344790 5037 scope.go:117] "RemoveContainer" containerID="14f117ec2d9e6899bbbbc0a843ad051af40e93dd8a11557cf83f9ebb3f451f61" Nov 26 14:28:57 crc kubenswrapper[5037]: I1126 14:28:57.349152 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nzbd4"] Nov 26 14:28:57 crc kubenswrapper[5037]: I1126 14:28:57.356493 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-nzbd4"] Nov 26 14:28:57 crc kubenswrapper[5037]: I1126 14:28:57.396010 5037 scope.go:117] "RemoveContainer" containerID="7967850533de33104deff520eae5c41491f54f5d885336a3218d43dc625bdf93" Nov 26 14:28:57 crc kubenswrapper[5037]: I1126 14:28:57.917818 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e4e130-fda2-45b2-b399-b2aefb8c6be7" path="/var/lib/kubelet/pods/e7e4e130-fda2-45b2-b399-b2aefb8c6be7/volumes" Nov 26 14:30:00 crc kubenswrapper[5037]: I1126 14:30:00.215720 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402790-rrd4r"] Nov 26 14:30:00 crc kubenswrapper[5037]: E1126 14:30:00.216977 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7e4e130-fda2-45b2-b399-b2aefb8c6be7" containerName="registry-server" Nov 26 14:30:00 crc kubenswrapper[5037]: I1126 14:30:00.216994 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7e4e130-fda2-45b2-b399-b2aefb8c6be7" containerName="registry-server" Nov 26 14:30:00 crc kubenswrapper[5037]: E1126 14:30:00.217010 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7e4e130-fda2-45b2-b399-b2aefb8c6be7" containerName="extract-utilities" Nov 26 14:30:00 crc kubenswrapper[5037]: I1126 14:30:00.217019 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7e4e130-fda2-45b2-b399-b2aefb8c6be7" containerName="extract-utilities" Nov 26 14:30:00 crc kubenswrapper[5037]: E1126 14:30:00.217043 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7e4e130-fda2-45b2-b399-b2aefb8c6be7" containerName="extract-content" Nov 26 14:30:00 crc kubenswrapper[5037]: I1126 14:30:00.217050 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7e4e130-fda2-45b2-b399-b2aefb8c6be7" containerName="extract-content" Nov 26 14:30:00 crc kubenswrapper[5037]: I1126 14:30:00.217146 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="e7e4e130-fda2-45b2-b399-b2aefb8c6be7" containerName="registry-server" Nov 26 14:30:00 crc kubenswrapper[5037]: I1126 14:30:00.217700 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402790-rrd4r" Nov 26 14:30:00 crc kubenswrapper[5037]: I1126 14:30:00.220993 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 14:30:00 crc kubenswrapper[5037]: I1126 14:30:00.221198 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 14:30:00 crc kubenswrapper[5037]: I1126 14:30:00.228499 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402790-rrd4r"] Nov 26 14:30:00 crc kubenswrapper[5037]: I1126 14:30:00.351023 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bed440b8-34d8-4d85-8bb3-b682c60cbdfd-secret-volume\") pod \"collect-profiles-29402790-rrd4r\" (UID: \"bed440b8-34d8-4d85-8bb3-b682c60cbdfd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402790-rrd4r" Nov 26 14:30:00 crc kubenswrapper[5037]: I1126 14:30:00.351303 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5dztk\" (UniqueName: \"kubernetes.io/projected/bed440b8-34d8-4d85-8bb3-b682c60cbdfd-kube-api-access-5dztk\") pod \"collect-profiles-29402790-rrd4r\" (UID: \"bed440b8-34d8-4d85-8bb3-b682c60cbdfd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402790-rrd4r" Nov 26 14:30:00 crc kubenswrapper[5037]: I1126 14:30:00.351381 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bed440b8-34d8-4d85-8bb3-b682c60cbdfd-config-volume\") pod \"collect-profiles-29402790-rrd4r\" (UID: \"bed440b8-34d8-4d85-8bb3-b682c60cbdfd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402790-rrd4r" Nov 26 14:30:00 crc kubenswrapper[5037]: I1126 14:30:00.453093 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5dztk\" (UniqueName: \"kubernetes.io/projected/bed440b8-34d8-4d85-8bb3-b682c60cbdfd-kube-api-access-5dztk\") pod \"collect-profiles-29402790-rrd4r\" (UID: \"bed440b8-34d8-4d85-8bb3-b682c60cbdfd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402790-rrd4r" Nov 26 14:30:00 crc kubenswrapper[5037]: I1126 14:30:00.453176 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bed440b8-34d8-4d85-8bb3-b682c60cbdfd-config-volume\") pod \"collect-profiles-29402790-rrd4r\" (UID: \"bed440b8-34d8-4d85-8bb3-b682c60cbdfd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402790-rrd4r" Nov 26 14:30:00 crc kubenswrapper[5037]: I1126 14:30:00.453210 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bed440b8-34d8-4d85-8bb3-b682c60cbdfd-secret-volume\") pod \"collect-profiles-29402790-rrd4r\" (UID: \"bed440b8-34d8-4d85-8bb3-b682c60cbdfd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402790-rrd4r" Nov 26 14:30:00 crc kubenswrapper[5037]: I1126 14:30:00.454511 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bed440b8-34d8-4d85-8bb3-b682c60cbdfd-config-volume\") pod \"collect-profiles-29402790-rrd4r\" (UID: \"bed440b8-34d8-4d85-8bb3-b682c60cbdfd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402790-rrd4r" Nov 26 14:30:00 crc kubenswrapper[5037]: I1126 14:30:00.461269 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bed440b8-34d8-4d85-8bb3-b682c60cbdfd-secret-volume\") pod \"collect-profiles-29402790-rrd4r\" (UID: \"bed440b8-34d8-4d85-8bb3-b682c60cbdfd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402790-rrd4r" Nov 26 14:30:00 crc kubenswrapper[5037]: I1126 14:30:00.470460 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5dztk\" (UniqueName: \"kubernetes.io/projected/bed440b8-34d8-4d85-8bb3-b682c60cbdfd-kube-api-access-5dztk\") pod \"collect-profiles-29402790-rrd4r\" (UID: \"bed440b8-34d8-4d85-8bb3-b682c60cbdfd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402790-rrd4r" Nov 26 14:30:00 crc kubenswrapper[5037]: I1126 14:30:00.543493 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402790-rrd4r" Nov 26 14:30:00 crc kubenswrapper[5037]: I1126 14:30:00.751861 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402790-rrd4r"] Nov 26 14:30:01 crc kubenswrapper[5037]: I1126 14:30:01.001188 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402790-rrd4r" event={"ID":"bed440b8-34d8-4d85-8bb3-b682c60cbdfd","Type":"ContainerStarted","Data":"e2d233c185d2f968ac464980d14f124c2ab4b45b5506f10ff0ed7b3e34866798"} Nov 26 14:30:02 crc kubenswrapper[5037]: I1126 14:30:02.007913 5037 generic.go:334] "Generic (PLEG): container finished" podID="bed440b8-34d8-4d85-8bb3-b682c60cbdfd" containerID="aa79fb2ceb2bfd00b0002638a4d8f0c4008de74f954f2d69de297cb3f6ec14f3" exitCode=0 Nov 26 14:30:02 crc kubenswrapper[5037]: I1126 14:30:02.008075 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402790-rrd4r" event={"ID":"bed440b8-34d8-4d85-8bb3-b682c60cbdfd","Type":"ContainerDied","Data":"aa79fb2ceb2bfd00b0002638a4d8f0c4008de74f954f2d69de297cb3f6ec14f3"} Nov 26 14:30:03 crc kubenswrapper[5037]: I1126 14:30:03.281264 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402790-rrd4r" Nov 26 14:30:03 crc kubenswrapper[5037]: I1126 14:30:03.298522 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bed440b8-34d8-4d85-8bb3-b682c60cbdfd-config-volume\") pod \"bed440b8-34d8-4d85-8bb3-b682c60cbdfd\" (UID: \"bed440b8-34d8-4d85-8bb3-b682c60cbdfd\") " Nov 26 14:30:03 crc kubenswrapper[5037]: I1126 14:30:03.298599 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bed440b8-34d8-4d85-8bb3-b682c60cbdfd-secret-volume\") pod \"bed440b8-34d8-4d85-8bb3-b682c60cbdfd\" (UID: \"bed440b8-34d8-4d85-8bb3-b682c60cbdfd\") " Nov 26 14:30:03 crc kubenswrapper[5037]: I1126 14:30:03.298634 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5dztk\" (UniqueName: \"kubernetes.io/projected/bed440b8-34d8-4d85-8bb3-b682c60cbdfd-kube-api-access-5dztk\") pod \"bed440b8-34d8-4d85-8bb3-b682c60cbdfd\" (UID: \"bed440b8-34d8-4d85-8bb3-b682c60cbdfd\") " Nov 26 14:30:03 crc kubenswrapper[5037]: I1126 14:30:03.299758 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bed440b8-34d8-4d85-8bb3-b682c60cbdfd-config-volume" (OuterVolumeSpecName: "config-volume") pod "bed440b8-34d8-4d85-8bb3-b682c60cbdfd" (UID: "bed440b8-34d8-4d85-8bb3-b682c60cbdfd"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:30:03 crc kubenswrapper[5037]: I1126 14:30:03.306802 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bed440b8-34d8-4d85-8bb3-b682c60cbdfd-kube-api-access-5dztk" (OuterVolumeSpecName: "kube-api-access-5dztk") pod "bed440b8-34d8-4d85-8bb3-b682c60cbdfd" (UID: "bed440b8-34d8-4d85-8bb3-b682c60cbdfd"). InnerVolumeSpecName "kube-api-access-5dztk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:30:03 crc kubenswrapper[5037]: I1126 14:30:03.307955 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bed440b8-34d8-4d85-8bb3-b682c60cbdfd-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "bed440b8-34d8-4d85-8bb3-b682c60cbdfd" (UID: "bed440b8-34d8-4d85-8bb3-b682c60cbdfd"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:30:03 crc kubenswrapper[5037]: I1126 14:30:03.401154 5037 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bed440b8-34d8-4d85-8bb3-b682c60cbdfd-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 14:30:03 crc kubenswrapper[5037]: I1126 14:30:03.401219 5037 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bed440b8-34d8-4d85-8bb3-b682c60cbdfd-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 14:30:03 crc kubenswrapper[5037]: I1126 14:30:03.401240 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5dztk\" (UniqueName: \"kubernetes.io/projected/bed440b8-34d8-4d85-8bb3-b682c60cbdfd-kube-api-access-5dztk\") on node \"crc\" DevicePath \"\"" Nov 26 14:30:04 crc kubenswrapper[5037]: I1126 14:30:04.021537 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402790-rrd4r" event={"ID":"bed440b8-34d8-4d85-8bb3-b682c60cbdfd","Type":"ContainerDied","Data":"e2d233c185d2f968ac464980d14f124c2ab4b45b5506f10ff0ed7b3e34866798"} Nov 26 14:30:04 crc kubenswrapper[5037]: I1126 14:30:04.021981 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e2d233c185d2f968ac464980d14f124c2ab4b45b5506f10ff0ed7b3e34866798" Nov 26 14:30:04 crc kubenswrapper[5037]: I1126 14:30:04.021666 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402790-rrd4r" Nov 26 14:30:11 crc kubenswrapper[5037]: I1126 14:30:11.246997 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 14:30:11 crc kubenswrapper[5037]: I1126 14:30:11.248051 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 14:30:41 crc kubenswrapper[5037]: I1126 14:30:41.246831 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 14:30:41 crc kubenswrapper[5037]: I1126 14:30:41.247563 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 14:31:11 crc kubenswrapper[5037]: I1126 14:31:11.247834 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 14:31:11 crc kubenswrapper[5037]: I1126 14:31:11.248606 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 14:31:11 crc kubenswrapper[5037]: I1126 14:31:11.248864 5037 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" Nov 26 14:31:11 crc kubenswrapper[5037]: I1126 14:31:11.249949 5037 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ff44b46a3dc466f256d3b6fac132034130b6623577a5e3d570d1982ec2c3ae66"} pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 14:31:11 crc kubenswrapper[5037]: I1126 14:31:11.250053 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" containerID="cri-o://ff44b46a3dc466f256d3b6fac132034130b6623577a5e3d570d1982ec2c3ae66" gracePeriod=600 Nov 26 14:31:11 crc kubenswrapper[5037]: I1126 14:31:11.463632 5037 generic.go:334] "Generic (PLEG): container finished" podID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerID="ff44b46a3dc466f256d3b6fac132034130b6623577a5e3d570d1982ec2c3ae66" exitCode=0 Nov 26 14:31:11 crc kubenswrapper[5037]: I1126 14:31:11.463787 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" event={"ID":"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb","Type":"ContainerDied","Data":"ff44b46a3dc466f256d3b6fac132034130b6623577a5e3d570d1982ec2c3ae66"} Nov 26 14:31:11 crc kubenswrapper[5037]: I1126 14:31:11.464068 5037 scope.go:117] "RemoveContainer" containerID="349f155942970ffc9212c9698596c47d1a0002439affbcd295ff8f4a649f33b4" Nov 26 14:31:12 crc kubenswrapper[5037]: I1126 14:31:12.472361 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" event={"ID":"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb","Type":"ContainerStarted","Data":"36a7c42fc7524fe0b2d1a2075eae30f52f037f2969e9db7800448ccd49cfcc57"} Nov 26 14:31:53 crc kubenswrapper[5037]: I1126 14:31:53.164960 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-fdhhj"] Nov 26 14:31:53 crc kubenswrapper[5037]: I1126 14:31:53.166994 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="ovn-controller" containerID="cri-o://feebe91c810ee2c7c5f9aefe54887ecbc31a89a83a03ac6bbac7f373e15752e4" gracePeriod=30 Nov 26 14:31:53 crc kubenswrapper[5037]: I1126 14:31:53.167039 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="nbdb" containerID="cri-o://d0319889506261e48b8db06cd292ba17fb46399b0b2063670c5c0e179a801f9c" gracePeriod=30 Nov 26 14:31:53 crc kubenswrapper[5037]: I1126 14:31:53.167163 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="ovn-acl-logging" containerID="cri-o://bf3e3bb0b0e0730b9bbd45aad381d5f38940c4a36676db5e9264ccb473f173f4" gracePeriod=30 Nov 26 14:31:53 crc kubenswrapper[5037]: I1126 14:31:53.167189 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="northd" containerID="cri-o://86a35a51d679468b21fdd174d7148d46f8c1acddbae627ed5c27b61aa399b897" gracePeriod=30 Nov 26 14:31:53 crc kubenswrapper[5037]: I1126 14:31:53.167237 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://306a3ae23bf504e98f4e7be45cebf984a5dbf47fda9720237c881cf65de43b68" gracePeriod=30 Nov 26 14:31:53 crc kubenswrapper[5037]: I1126 14:31:53.167141 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="sbdb" containerID="cri-o://ae3b5707990abcd8005bb71376bda6e0f62c32c806b11c5db27e0e06e5ca90c2" gracePeriod=30 Nov 26 14:31:53 crc kubenswrapper[5037]: I1126 14:31:53.167152 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="kube-rbac-proxy-node" containerID="cri-o://31f1e8bfa4deb76c13528d9aa2414c14ba6cc0e4637f2cf84c153398b360cad4" gracePeriod=30 Nov 26 14:31:53 crc kubenswrapper[5037]: I1126 14:31:53.198684 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="ovnkube-controller" containerID="cri-o://34de66ca7844b823c4e7913ac3c3ba14d0b8652a0741d3072c1cd913f6f68d6c" gracePeriod=30 Nov 26 14:31:53 crc kubenswrapper[5037]: I1126 14:31:53.741134 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fdhhj_454ee6da-70e5-4d30-89e5-19a35123a278/ovnkube-controller/3.log" Nov 26 14:31:53 crc kubenswrapper[5037]: I1126 14:31:53.743638 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fdhhj_454ee6da-70e5-4d30-89e5-19a35123a278/ovn-acl-logging/0.log" Nov 26 14:31:53 crc kubenswrapper[5037]: I1126 14:31:53.744199 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fdhhj_454ee6da-70e5-4d30-89e5-19a35123a278/ovn-controller/0.log" Nov 26 14:31:53 crc kubenswrapper[5037]: I1126 14:31:53.744658 5037 generic.go:334] "Generic (PLEG): container finished" podID="454ee6da-70e5-4d30-89e5-19a35123a278" containerID="34de66ca7844b823c4e7913ac3c3ba14d0b8652a0741d3072c1cd913f6f68d6c" exitCode=0 Nov 26 14:31:53 crc kubenswrapper[5037]: I1126 14:31:53.744687 5037 generic.go:334] "Generic (PLEG): container finished" podID="454ee6da-70e5-4d30-89e5-19a35123a278" containerID="ae3b5707990abcd8005bb71376bda6e0f62c32c806b11c5db27e0e06e5ca90c2" exitCode=0 Nov 26 14:31:53 crc kubenswrapper[5037]: I1126 14:31:53.744697 5037 generic.go:334] "Generic (PLEG): container finished" podID="454ee6da-70e5-4d30-89e5-19a35123a278" containerID="d0319889506261e48b8db06cd292ba17fb46399b0b2063670c5c0e179a801f9c" exitCode=0 Nov 26 14:31:53 crc kubenswrapper[5037]: I1126 14:31:53.744706 5037 generic.go:334] "Generic (PLEG): container finished" podID="454ee6da-70e5-4d30-89e5-19a35123a278" containerID="86a35a51d679468b21fdd174d7148d46f8c1acddbae627ed5c27b61aa399b897" exitCode=0 Nov 26 14:31:53 crc kubenswrapper[5037]: I1126 14:31:53.744716 5037 generic.go:334] "Generic (PLEG): container finished" podID="454ee6da-70e5-4d30-89e5-19a35123a278" containerID="306a3ae23bf504e98f4e7be45cebf984a5dbf47fda9720237c881cf65de43b68" exitCode=0 Nov 26 14:31:53 crc kubenswrapper[5037]: I1126 14:31:53.744726 5037 generic.go:334] "Generic (PLEG): container finished" podID="454ee6da-70e5-4d30-89e5-19a35123a278" containerID="31f1e8bfa4deb76c13528d9aa2414c14ba6cc0e4637f2cf84c153398b360cad4" exitCode=0 Nov 26 14:31:53 crc kubenswrapper[5037]: I1126 14:31:53.744732 5037 generic.go:334] "Generic (PLEG): container finished" podID="454ee6da-70e5-4d30-89e5-19a35123a278" containerID="bf3e3bb0b0e0730b9bbd45aad381d5f38940c4a36676db5e9264ccb473f173f4" exitCode=143 Nov 26 14:31:53 crc kubenswrapper[5037]: I1126 14:31:53.744740 5037 generic.go:334] "Generic (PLEG): container finished" podID="454ee6da-70e5-4d30-89e5-19a35123a278" containerID="feebe91c810ee2c7c5f9aefe54887ecbc31a89a83a03ac6bbac7f373e15752e4" exitCode=143 Nov 26 14:31:53 crc kubenswrapper[5037]: I1126 14:31:53.744690 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" event={"ID":"454ee6da-70e5-4d30-89e5-19a35123a278","Type":"ContainerDied","Data":"34de66ca7844b823c4e7913ac3c3ba14d0b8652a0741d3072c1cd913f6f68d6c"} Nov 26 14:31:53 crc kubenswrapper[5037]: I1126 14:31:53.744803 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" event={"ID":"454ee6da-70e5-4d30-89e5-19a35123a278","Type":"ContainerDied","Data":"ae3b5707990abcd8005bb71376bda6e0f62c32c806b11c5db27e0e06e5ca90c2"} Nov 26 14:31:53 crc kubenswrapper[5037]: I1126 14:31:53.744815 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" event={"ID":"454ee6da-70e5-4d30-89e5-19a35123a278","Type":"ContainerDied","Data":"d0319889506261e48b8db06cd292ba17fb46399b0b2063670c5c0e179a801f9c"} Nov 26 14:31:53 crc kubenswrapper[5037]: I1126 14:31:53.744826 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" event={"ID":"454ee6da-70e5-4d30-89e5-19a35123a278","Type":"ContainerDied","Data":"86a35a51d679468b21fdd174d7148d46f8c1acddbae627ed5c27b61aa399b897"} Nov 26 14:31:53 crc kubenswrapper[5037]: I1126 14:31:53.744835 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" event={"ID":"454ee6da-70e5-4d30-89e5-19a35123a278","Type":"ContainerDied","Data":"306a3ae23bf504e98f4e7be45cebf984a5dbf47fda9720237c881cf65de43b68"} Nov 26 14:31:53 crc kubenswrapper[5037]: I1126 14:31:53.744847 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" event={"ID":"454ee6da-70e5-4d30-89e5-19a35123a278","Type":"ContainerDied","Data":"31f1e8bfa4deb76c13528d9aa2414c14ba6cc0e4637f2cf84c153398b360cad4"} Nov 26 14:31:53 crc kubenswrapper[5037]: I1126 14:31:53.744859 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" event={"ID":"454ee6da-70e5-4d30-89e5-19a35123a278","Type":"ContainerDied","Data":"bf3e3bb0b0e0730b9bbd45aad381d5f38940c4a36676db5e9264ccb473f173f4"} Nov 26 14:31:53 crc kubenswrapper[5037]: I1126 14:31:53.744870 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" event={"ID":"454ee6da-70e5-4d30-89e5-19a35123a278","Type":"ContainerDied","Data":"feebe91c810ee2c7c5f9aefe54887ecbc31a89a83a03ac6bbac7f373e15752e4"} Nov 26 14:31:53 crc kubenswrapper[5037]: I1126 14:31:53.744887 5037 scope.go:117] "RemoveContainer" containerID="7be4af5975c1ad5d347b761c03e870cfdbd3b774e45f13f59fc0af4bedb3772c" Nov 26 14:31:53 crc kubenswrapper[5037]: I1126 14:31:53.750864 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-lxpjp_490e7d88-ae7f-45f9-ab12-598c33e3bc69/kube-multus/2.log" Nov 26 14:31:53 crc kubenswrapper[5037]: I1126 14:31:53.751798 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-lxpjp_490e7d88-ae7f-45f9-ab12-598c33e3bc69/kube-multus/1.log" Nov 26 14:31:53 crc kubenswrapper[5037]: I1126 14:31:53.751871 5037 generic.go:334] "Generic (PLEG): container finished" podID="490e7d88-ae7f-45f9-ab12-598c33e3bc69" containerID="d00fe2156839598797b806c7acdd6afda48f3c21d5efc19c29a33e6605c33e2a" exitCode=2 Nov 26 14:31:53 crc kubenswrapper[5037]: I1126 14:31:53.751923 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-lxpjp" event={"ID":"490e7d88-ae7f-45f9-ab12-598c33e3bc69","Type":"ContainerDied","Data":"d00fe2156839598797b806c7acdd6afda48f3c21d5efc19c29a33e6605c33e2a"} Nov 26 14:31:53 crc kubenswrapper[5037]: I1126 14:31:53.752684 5037 scope.go:117] "RemoveContainer" containerID="d00fe2156839598797b806c7acdd6afda48f3c21d5efc19c29a33e6605c33e2a" Nov 26 14:31:53 crc kubenswrapper[5037]: I1126 14:31:53.781342 5037 scope.go:117] "RemoveContainer" containerID="58232632cfc8ddcd9e524acaf4b195314aeed89c0c6f892596b6020a82de4d38" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.112847 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fdhhj_454ee6da-70e5-4d30-89e5-19a35123a278/ovn-acl-logging/0.log" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.113833 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fdhhj_454ee6da-70e5-4d30-89e5-19a35123a278/ovn-controller/0.log" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.114318 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.184246 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-9v68f"] Nov 26 14:31:54 crc kubenswrapper[5037]: E1126 14:31:54.184543 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="nbdb" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.184559 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="nbdb" Nov 26 14:31:54 crc kubenswrapper[5037]: E1126 14:31:54.184573 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="sbdb" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.184581 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="sbdb" Nov 26 14:31:54 crc kubenswrapper[5037]: E1126 14:31:54.184593 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="ovnkube-controller" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.184602 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="ovnkube-controller" Nov 26 14:31:54 crc kubenswrapper[5037]: E1126 14:31:54.184612 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="kubecfg-setup" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.184619 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="kubecfg-setup" Nov 26 14:31:54 crc kubenswrapper[5037]: E1126 14:31:54.184629 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="northd" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.184636 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="northd" Nov 26 14:31:54 crc kubenswrapper[5037]: E1126 14:31:54.184645 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="ovnkube-controller" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.184653 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="ovnkube-controller" Nov 26 14:31:54 crc kubenswrapper[5037]: E1126 14:31:54.184662 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="ovn-acl-logging" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.184669 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="ovn-acl-logging" Nov 26 14:31:54 crc kubenswrapper[5037]: E1126 14:31:54.184678 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="kube-rbac-proxy-ovn-metrics" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.184686 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="kube-rbac-proxy-ovn-metrics" Nov 26 14:31:54 crc kubenswrapper[5037]: E1126 14:31:54.184700 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="ovnkube-controller" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.184709 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="ovnkube-controller" Nov 26 14:31:54 crc kubenswrapper[5037]: E1126 14:31:54.184720 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="ovnkube-controller" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.184727 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="ovnkube-controller" Nov 26 14:31:54 crc kubenswrapper[5037]: E1126 14:31:54.184740 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bed440b8-34d8-4d85-8bb3-b682c60cbdfd" containerName="collect-profiles" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.184748 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="bed440b8-34d8-4d85-8bb3-b682c60cbdfd" containerName="collect-profiles" Nov 26 14:31:54 crc kubenswrapper[5037]: E1126 14:31:54.184760 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="kube-rbac-proxy-node" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.184768 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="kube-rbac-proxy-node" Nov 26 14:31:54 crc kubenswrapper[5037]: E1126 14:31:54.184779 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="ovn-controller" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.184787 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="ovn-controller" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.184892 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="northd" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.184904 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="ovn-controller" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.184916 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="nbdb" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.184926 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="ovnkube-controller" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.184934 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="kube-rbac-proxy-ovn-metrics" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.184943 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="sbdb" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.184956 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="kube-rbac-proxy-node" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.184968 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="ovnkube-controller" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.184976 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="ovnkube-controller" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.184985 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="ovn-acl-logging" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.184993 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="ovnkube-controller" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.185001 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="bed440b8-34d8-4d85-8bb3-b682c60cbdfd" containerName="collect-profiles" Nov 26 14:31:54 crc kubenswrapper[5037]: E1126 14:31:54.185102 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="ovnkube-controller" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.185112 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="ovnkube-controller" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.185216 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" containerName="ovnkube-controller" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.187158 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.208985 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/454ee6da-70e5-4d30-89e5-19a35123a278-ovn-node-metrics-cert\") pod \"454ee6da-70e5-4d30-89e5-19a35123a278\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.209101 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-etc-openvswitch\") pod \"454ee6da-70e5-4d30-89e5-19a35123a278\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.209174 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-run-ovn-kubernetes\") pod \"454ee6da-70e5-4d30-89e5-19a35123a278\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.209196 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-cni-netd\") pod \"454ee6da-70e5-4d30-89e5-19a35123a278\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.209230 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/454ee6da-70e5-4d30-89e5-19a35123a278-ovnkube-config\") pod \"454ee6da-70e5-4d30-89e5-19a35123a278\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.209254 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-run-openvswitch\") pod \"454ee6da-70e5-4d30-89e5-19a35123a278\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.209248 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "454ee6da-70e5-4d30-89e5-19a35123a278" (UID: "454ee6da-70e5-4d30-89e5-19a35123a278"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.209310 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/454ee6da-70e5-4d30-89e5-19a35123a278-env-overrides\") pod \"454ee6da-70e5-4d30-89e5-19a35123a278\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.209420 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "454ee6da-70e5-4d30-89e5-19a35123a278" (UID: "454ee6da-70e5-4d30-89e5-19a35123a278"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.209446 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/454ee6da-70e5-4d30-89e5-19a35123a278-ovnkube-script-lib\") pod \"454ee6da-70e5-4d30-89e5-19a35123a278\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.209631 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-cni-bin\") pod \"454ee6da-70e5-4d30-89e5-19a35123a278\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.209682 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-systemd-units\") pod \"454ee6da-70e5-4d30-89e5-19a35123a278\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.209727 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-run-systemd\") pod \"454ee6da-70e5-4d30-89e5-19a35123a278\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.209784 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-var-lib-cni-networks-ovn-kubernetes\") pod \"454ee6da-70e5-4d30-89e5-19a35123a278\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.209845 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-run-ovn\") pod \"454ee6da-70e5-4d30-89e5-19a35123a278\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.209874 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-run-netns\") pod \"454ee6da-70e5-4d30-89e5-19a35123a278\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.209896 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-log-socket\") pod \"454ee6da-70e5-4d30-89e5-19a35123a278\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.209917 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-kubelet\") pod \"454ee6da-70e5-4d30-89e5-19a35123a278\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.209964 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-var-lib-openvswitch\") pod \"454ee6da-70e5-4d30-89e5-19a35123a278\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.209964 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/454ee6da-70e5-4d30-89e5-19a35123a278-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "454ee6da-70e5-4d30-89e5-19a35123a278" (UID: "454ee6da-70e5-4d30-89e5-19a35123a278"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.210001 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mhgm2\" (UniqueName: \"kubernetes.io/projected/454ee6da-70e5-4d30-89e5-19a35123a278-kube-api-access-mhgm2\") pod \"454ee6da-70e5-4d30-89e5-19a35123a278\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.210027 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-node-log\") pod \"454ee6da-70e5-4d30-89e5-19a35123a278\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.210053 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-slash\") pod \"454ee6da-70e5-4d30-89e5-19a35123a278\" (UID: \"454ee6da-70e5-4d30-89e5-19a35123a278\") " Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.210055 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/454ee6da-70e5-4d30-89e5-19a35123a278-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "454ee6da-70e5-4d30-89e5-19a35123a278" (UID: "454ee6da-70e5-4d30-89e5-19a35123a278"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.210118 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "454ee6da-70e5-4d30-89e5-19a35123a278" (UID: "454ee6da-70e5-4d30-89e5-19a35123a278"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.210162 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "454ee6da-70e5-4d30-89e5-19a35123a278" (UID: "454ee6da-70e5-4d30-89e5-19a35123a278"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.210204 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "454ee6da-70e5-4d30-89e5-19a35123a278" (UID: "454ee6da-70e5-4d30-89e5-19a35123a278"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.210246 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "454ee6da-70e5-4d30-89e5-19a35123a278" (UID: "454ee6da-70e5-4d30-89e5-19a35123a278"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.210329 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-slash" (OuterVolumeSpecName: "host-slash") pod "454ee6da-70e5-4d30-89e5-19a35123a278" (UID: "454ee6da-70e5-4d30-89e5-19a35123a278"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.210358 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-log-socket" (OuterVolumeSpecName: "log-socket") pod "454ee6da-70e5-4d30-89e5-19a35123a278" (UID: "454ee6da-70e5-4d30-89e5-19a35123a278"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.210397 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "454ee6da-70e5-4d30-89e5-19a35123a278" (UID: "454ee6da-70e5-4d30-89e5-19a35123a278"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.210426 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/454ee6da-70e5-4d30-89e5-19a35123a278-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "454ee6da-70e5-4d30-89e5-19a35123a278" (UID: "454ee6da-70e5-4d30-89e5-19a35123a278"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.210438 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "454ee6da-70e5-4d30-89e5-19a35123a278" (UID: "454ee6da-70e5-4d30-89e5-19a35123a278"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.210462 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "454ee6da-70e5-4d30-89e5-19a35123a278" (UID: "454ee6da-70e5-4d30-89e5-19a35123a278"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.210485 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "454ee6da-70e5-4d30-89e5-19a35123a278" (UID: "454ee6da-70e5-4d30-89e5-19a35123a278"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.210527 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-node-log" (OuterVolumeSpecName: "node-log") pod "454ee6da-70e5-4d30-89e5-19a35123a278" (UID: "454ee6da-70e5-4d30-89e5-19a35123a278"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.210575 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "454ee6da-70e5-4d30-89e5-19a35123a278" (UID: "454ee6da-70e5-4d30-89e5-19a35123a278"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.210805 5037 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.210843 5037 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.210863 5037 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.210910 5037 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.210928 5037 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/454ee6da-70e5-4d30-89e5-19a35123a278-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.210946 5037 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/454ee6da-70e5-4d30-89e5-19a35123a278-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.210966 5037 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/454ee6da-70e5-4d30-89e5-19a35123a278-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.210982 5037 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.210998 5037 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.211016 5037 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.211033 5037 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.211049 5037 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.211065 5037 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-log-socket\") on node \"crc\" DevicePath \"\"" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.211080 5037 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.211098 5037 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.211114 5037 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-node-log\") on node \"crc\" DevicePath \"\"" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.211129 5037 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-host-slash\") on node \"crc\" DevicePath \"\"" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.217841 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/454ee6da-70e5-4d30-89e5-19a35123a278-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "454ee6da-70e5-4d30-89e5-19a35123a278" (UID: "454ee6da-70e5-4d30-89e5-19a35123a278"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.218967 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/454ee6da-70e5-4d30-89e5-19a35123a278-kube-api-access-mhgm2" (OuterVolumeSpecName: "kube-api-access-mhgm2") pod "454ee6da-70e5-4d30-89e5-19a35123a278" (UID: "454ee6da-70e5-4d30-89e5-19a35123a278"). InnerVolumeSpecName "kube-api-access-mhgm2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.228597 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "454ee6da-70e5-4d30-89e5-19a35123a278" (UID: "454ee6da-70e5-4d30-89e5-19a35123a278"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.312109 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-run-systemd\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.312166 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-log-socket\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.312197 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.312220 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/d7a5a3a2-64d8-4679-864e-827702cda894-ovnkube-script-lib\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.312246 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-host-run-ovn-kubernetes\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.312270 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-run-openvswitch\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.312324 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d7a5a3a2-64d8-4679-864e-827702cda894-ovnkube-config\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.312377 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x75t2\" (UniqueName: \"kubernetes.io/projected/d7a5a3a2-64d8-4679-864e-827702cda894-kube-api-access-x75t2\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.312408 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-host-cni-bin\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.312442 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-host-cni-netd\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.312475 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-host-slash\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.312496 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-etc-openvswitch\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.312525 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-node-log\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.312541 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d7a5a3a2-64d8-4679-864e-827702cda894-env-overrides\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.312559 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-host-kubelet\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.312579 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d7a5a3a2-64d8-4679-864e-827702cda894-ovn-node-metrics-cert\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.312595 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-systemd-units\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.312667 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-host-run-netns\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.312695 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-var-lib-openvswitch\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.312726 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-run-ovn\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.312775 5037 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/454ee6da-70e5-4d30-89e5-19a35123a278-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.312787 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mhgm2\" (UniqueName: \"kubernetes.io/projected/454ee6da-70e5-4d30-89e5-19a35123a278-kube-api-access-mhgm2\") on node \"crc\" DevicePath \"\"" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.312799 5037 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/454ee6da-70e5-4d30-89e5-19a35123a278-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.414682 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-run-systemd\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.414768 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-log-socket\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.414791 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.414817 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/d7a5a3a2-64d8-4679-864e-827702cda894-ovnkube-script-lib\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.414837 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-host-run-ovn-kubernetes\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.414852 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-run-openvswitch\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.414870 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d7a5a3a2-64d8-4679-864e-827702cda894-ovnkube-config\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.414895 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x75t2\" (UniqueName: \"kubernetes.io/projected/d7a5a3a2-64d8-4679-864e-827702cda894-kube-api-access-x75t2\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.414894 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-run-systemd\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.414947 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.414979 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-host-cni-bin\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.414974 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-host-run-ovn-kubernetes\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.414912 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-host-cni-bin\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.415004 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-run-openvswitch\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.414907 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-log-socket\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.415180 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-host-cni-netd\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.415213 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-host-slash\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.415251 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-host-slash\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.415256 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-etc-openvswitch\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.415428 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d7a5a3a2-64d8-4679-864e-827702cda894-env-overrides\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.415452 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-node-log\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.415470 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-host-kubelet\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.415496 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-systemd-units\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.415512 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d7a5a3a2-64d8-4679-864e-827702cda894-ovn-node-metrics-cert\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.415537 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-host-run-netns\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.415559 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-var-lib-openvswitch\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.415598 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-run-ovn\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.415664 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-run-ovn\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.415245 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-host-cni-netd\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.415299 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-etc-openvswitch\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.416334 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-var-lib-openvswitch\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.416337 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-host-run-netns\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.416310 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-host-kubelet\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.416265 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-node-log\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.416271 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/d7a5a3a2-64d8-4679-864e-827702cda894-systemd-units\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.416759 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d7a5a3a2-64d8-4679-864e-827702cda894-ovnkube-config\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.417155 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d7a5a3a2-64d8-4679-864e-827702cda894-env-overrides\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.417204 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/d7a5a3a2-64d8-4679-864e-827702cda894-ovnkube-script-lib\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.425651 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d7a5a3a2-64d8-4679-864e-827702cda894-ovn-node-metrics-cert\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.434521 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x75t2\" (UniqueName: \"kubernetes.io/projected/d7a5a3a2-64d8-4679-864e-827702cda894-kube-api-access-x75t2\") pod \"ovnkube-node-9v68f\" (UID: \"d7a5a3a2-64d8-4679-864e-827702cda894\") " pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.504247 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:31:54 crc kubenswrapper[5037]: W1126 14:31:54.526477 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd7a5a3a2_64d8_4679_864e_827702cda894.slice/crio-d1e26c45f8484c13b598a8c070133f493021cddda772a6f31503575071ef117d WatchSource:0}: Error finding container d1e26c45f8484c13b598a8c070133f493021cddda772a6f31503575071ef117d: Status 404 returned error can't find the container with id d1e26c45f8484c13b598a8c070133f493021cddda772a6f31503575071ef117d Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.759621 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-lxpjp_490e7d88-ae7f-45f9-ab12-598c33e3bc69/kube-multus/2.log" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.759726 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-lxpjp" event={"ID":"490e7d88-ae7f-45f9-ab12-598c33e3bc69","Type":"ContainerStarted","Data":"9bf58acf35311a19f4746450b656d2206b1049eab6287be2a8886755b68ecd78"} Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.762325 5037 generic.go:334] "Generic (PLEG): container finished" podID="d7a5a3a2-64d8-4679-864e-827702cda894" containerID="3e391901df18bfdfd28bbf0fcbe06a7fd24022b9af6c657dff42369707f16ae5" exitCode=0 Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.762384 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" event={"ID":"d7a5a3a2-64d8-4679-864e-827702cda894","Type":"ContainerDied","Data":"3e391901df18bfdfd28bbf0fcbe06a7fd24022b9af6c657dff42369707f16ae5"} Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.762463 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" event={"ID":"d7a5a3a2-64d8-4679-864e-827702cda894","Type":"ContainerStarted","Data":"d1e26c45f8484c13b598a8c070133f493021cddda772a6f31503575071ef117d"} Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.767639 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fdhhj_454ee6da-70e5-4d30-89e5-19a35123a278/ovn-acl-logging/0.log" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.768187 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fdhhj_454ee6da-70e5-4d30-89e5-19a35123a278/ovn-controller/0.log" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.769032 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" event={"ID":"454ee6da-70e5-4d30-89e5-19a35123a278","Type":"ContainerDied","Data":"81c9fc75923a803479b6e97e591997f55110abcaac1efa759598b5d9a4677f84"} Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.769101 5037 scope.go:117] "RemoveContainer" containerID="34de66ca7844b823c4e7913ac3c3ba14d0b8652a0741d3072c1cd913f6f68d6c" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.769585 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-fdhhj" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.803639 5037 scope.go:117] "RemoveContainer" containerID="ae3b5707990abcd8005bb71376bda6e0f62c32c806b11c5db27e0e06e5ca90c2" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.826218 5037 scope.go:117] "RemoveContainer" containerID="d0319889506261e48b8db06cd292ba17fb46399b0b2063670c5c0e179a801f9c" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.844618 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-fdhhj"] Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.848239 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-fdhhj"] Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.857078 5037 scope.go:117] "RemoveContainer" containerID="86a35a51d679468b21fdd174d7148d46f8c1acddbae627ed5c27b61aa399b897" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.875960 5037 scope.go:117] "RemoveContainer" containerID="306a3ae23bf504e98f4e7be45cebf984a5dbf47fda9720237c881cf65de43b68" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.895866 5037 scope.go:117] "RemoveContainer" containerID="31f1e8bfa4deb76c13528d9aa2414c14ba6cc0e4637f2cf84c153398b360cad4" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.916539 5037 scope.go:117] "RemoveContainer" containerID="bf3e3bb0b0e0730b9bbd45aad381d5f38940c4a36676db5e9264ccb473f173f4" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.937756 5037 scope.go:117] "RemoveContainer" containerID="feebe91c810ee2c7c5f9aefe54887ecbc31a89a83a03ac6bbac7f373e15752e4" Nov 26 14:31:54 crc kubenswrapper[5037]: I1126 14:31:54.958994 5037 scope.go:117] "RemoveContainer" containerID="fc52d745c6003ec4f3ec6324cb94f6a59844deb4d4ff67c6a98f8713bc801f58" Nov 26 14:31:55 crc kubenswrapper[5037]: I1126 14:31:55.781497 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" event={"ID":"d7a5a3a2-64d8-4679-864e-827702cda894","Type":"ContainerStarted","Data":"66f02bfd4cc94d8e712166ce2c65448dfdebf430e260115fdc427089186da57a"} Nov 26 14:31:55 crc kubenswrapper[5037]: I1126 14:31:55.781994 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" event={"ID":"d7a5a3a2-64d8-4679-864e-827702cda894","Type":"ContainerStarted","Data":"82ab49d5ed4ab9e5d396c82af4ddd1af8c120707ead5f46d00f6ac6dfc4a1a05"} Nov 26 14:31:55 crc kubenswrapper[5037]: I1126 14:31:55.782012 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" event={"ID":"d7a5a3a2-64d8-4679-864e-827702cda894","Type":"ContainerStarted","Data":"53438b6dc94b5f93d0711cd9c63328774051944ba1b95925f2c01e94780facb5"} Nov 26 14:31:55 crc kubenswrapper[5037]: I1126 14:31:55.782020 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" event={"ID":"d7a5a3a2-64d8-4679-864e-827702cda894","Type":"ContainerStarted","Data":"2212c3ff18100bfe967fff937025fcc5c53577b53b7c506e65b7d5ab8ca4f7b8"} Nov 26 14:31:55 crc kubenswrapper[5037]: I1126 14:31:55.782029 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" event={"ID":"d7a5a3a2-64d8-4679-864e-827702cda894","Type":"ContainerStarted","Data":"be31775e02f291690fe71561cecf2a9a99627b05780030ca6a51377bf1d31331"} Nov 26 14:31:55 crc kubenswrapper[5037]: I1126 14:31:55.782038 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" event={"ID":"d7a5a3a2-64d8-4679-864e-827702cda894","Type":"ContainerStarted","Data":"93880640f418932ba798d3d0bcd06d1c8de0b48498d8160eb31c9654ca0bb73a"} Nov 26 14:31:55 crc kubenswrapper[5037]: I1126 14:31:55.918232 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="454ee6da-70e5-4d30-89e5-19a35123a278" path="/var/lib/kubelet/pods/454ee6da-70e5-4d30-89e5-19a35123a278/volumes" Nov 26 14:31:58 crc kubenswrapper[5037]: I1126 14:31:58.813047 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" event={"ID":"d7a5a3a2-64d8-4679-864e-827702cda894","Type":"ContainerStarted","Data":"5579cf7f407f7052727838af879d9a69b3736f367d02c2b6cf9b658ef70dbd92"} Nov 26 14:31:59 crc kubenswrapper[5037]: I1126 14:31:59.879823 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-d8w7h"] Nov 26 14:31:59 crc kubenswrapper[5037]: I1126 14:31:59.881141 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-d8w7h" Nov 26 14:31:59 crc kubenswrapper[5037]: I1126 14:31:59.885532 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Nov 26 14:31:59 crc kubenswrapper[5037]: I1126 14:31:59.886063 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Nov 26 14:31:59 crc kubenswrapper[5037]: I1126 14:31:59.886270 5037 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-tt6rp" Nov 26 14:31:59 crc kubenswrapper[5037]: I1126 14:31:59.886281 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Nov 26 14:31:59 crc kubenswrapper[5037]: I1126 14:31:59.998362 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084-crc-storage\") pod \"crc-storage-crc-d8w7h\" (UID: \"2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084\") " pod="crc-storage/crc-storage-crc-d8w7h" Nov 26 14:31:59 crc kubenswrapper[5037]: I1126 14:31:59.998597 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jttq7\" (UniqueName: \"kubernetes.io/projected/2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084-kube-api-access-jttq7\") pod \"crc-storage-crc-d8w7h\" (UID: \"2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084\") " pod="crc-storage/crc-storage-crc-d8w7h" Nov 26 14:31:59 crc kubenswrapper[5037]: I1126 14:31:59.998658 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084-node-mnt\") pod \"crc-storage-crc-d8w7h\" (UID: \"2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084\") " pod="crc-storage/crc-storage-crc-d8w7h" Nov 26 14:32:00 crc kubenswrapper[5037]: I1126 14:32:00.100276 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jttq7\" (UniqueName: \"kubernetes.io/projected/2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084-kube-api-access-jttq7\") pod \"crc-storage-crc-d8w7h\" (UID: \"2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084\") " pod="crc-storage/crc-storage-crc-d8w7h" Nov 26 14:32:00 crc kubenswrapper[5037]: I1126 14:32:00.100369 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084-node-mnt\") pod \"crc-storage-crc-d8w7h\" (UID: \"2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084\") " pod="crc-storage/crc-storage-crc-d8w7h" Nov 26 14:32:00 crc kubenswrapper[5037]: I1126 14:32:00.100427 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084-crc-storage\") pod \"crc-storage-crc-d8w7h\" (UID: \"2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084\") " pod="crc-storage/crc-storage-crc-d8w7h" Nov 26 14:32:00 crc kubenswrapper[5037]: I1126 14:32:00.100803 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084-node-mnt\") pod \"crc-storage-crc-d8w7h\" (UID: \"2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084\") " pod="crc-storage/crc-storage-crc-d8w7h" Nov 26 14:32:00 crc kubenswrapper[5037]: I1126 14:32:00.101826 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084-crc-storage\") pod \"crc-storage-crc-d8w7h\" (UID: \"2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084\") " pod="crc-storage/crc-storage-crc-d8w7h" Nov 26 14:32:00 crc kubenswrapper[5037]: I1126 14:32:00.122153 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jttq7\" (UniqueName: \"kubernetes.io/projected/2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084-kube-api-access-jttq7\") pod \"crc-storage-crc-d8w7h\" (UID: \"2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084\") " pod="crc-storage/crc-storage-crc-d8w7h" Nov 26 14:32:00 crc kubenswrapper[5037]: I1126 14:32:00.276159 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-d8w7h" Nov 26 14:32:00 crc kubenswrapper[5037]: E1126 14:32:00.325214 5037 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-d8w7h_crc-storage_2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084_0(fe9252ff6c427168949e4a3a5ac0e5ca033575f894e017473cdae621270a04bf): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 26 14:32:00 crc kubenswrapper[5037]: E1126 14:32:00.325351 5037 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-d8w7h_crc-storage_2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084_0(fe9252ff6c427168949e4a3a5ac0e5ca033575f894e017473cdae621270a04bf): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-d8w7h" Nov 26 14:32:00 crc kubenswrapper[5037]: E1126 14:32:00.325385 5037 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-d8w7h_crc-storage_2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084_0(fe9252ff6c427168949e4a3a5ac0e5ca033575f894e017473cdae621270a04bf): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-d8w7h" Nov 26 14:32:00 crc kubenswrapper[5037]: E1126 14:32:00.325457 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-d8w7h_crc-storage(2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-d8w7h_crc-storage(2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-d8w7h_crc-storage_2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084_0(fe9252ff6c427168949e4a3a5ac0e5ca033575f894e017473cdae621270a04bf): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-d8w7h" podUID="2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084" Nov 26 14:32:00 crc kubenswrapper[5037]: I1126 14:32:00.831433 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" event={"ID":"d7a5a3a2-64d8-4679-864e-827702cda894","Type":"ContainerStarted","Data":"e43d0da29db3d77dd26b8771b96d3ac58f1a49f9d77f6fd2c91562f12dc24d7e"} Nov 26 14:32:00 crc kubenswrapper[5037]: I1126 14:32:00.831760 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:32:00 crc kubenswrapper[5037]: I1126 14:32:00.862598 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:32:00 crc kubenswrapper[5037]: I1126 14:32:00.864444 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" podStartSLOduration=6.864424917 podStartE2EDuration="6.864424917s" podCreationTimestamp="2025-11-26 14:31:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:32:00.861055906 +0000 UTC m=+987.657826100" watchObservedRunningTime="2025-11-26 14:32:00.864424917 +0000 UTC m=+987.661195101" Nov 26 14:32:01 crc kubenswrapper[5037]: I1126 14:32:01.343202 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-d8w7h"] Nov 26 14:32:01 crc kubenswrapper[5037]: I1126 14:32:01.343310 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-d8w7h" Nov 26 14:32:01 crc kubenswrapper[5037]: I1126 14:32:01.343784 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-d8w7h" Nov 26 14:32:01 crc kubenswrapper[5037]: E1126 14:32:01.368146 5037 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-d8w7h_crc-storage_2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084_0(d9076e3851e08067037a80decde71fcad54cf1f3eb19939bba65355ed77354ec): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 26 14:32:01 crc kubenswrapper[5037]: E1126 14:32:01.368205 5037 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-d8w7h_crc-storage_2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084_0(d9076e3851e08067037a80decde71fcad54cf1f3eb19939bba65355ed77354ec): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-d8w7h" Nov 26 14:32:01 crc kubenswrapper[5037]: E1126 14:32:01.368230 5037 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-d8w7h_crc-storage_2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084_0(d9076e3851e08067037a80decde71fcad54cf1f3eb19939bba65355ed77354ec): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-d8w7h" Nov 26 14:32:01 crc kubenswrapper[5037]: E1126 14:32:01.368278 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-d8w7h_crc-storage(2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-d8w7h_crc-storage(2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-d8w7h_crc-storage_2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084_0(d9076e3851e08067037a80decde71fcad54cf1f3eb19939bba65355ed77354ec): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-d8w7h" podUID="2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084" Nov 26 14:32:01 crc kubenswrapper[5037]: I1126 14:32:01.837651 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:32:01 crc kubenswrapper[5037]: I1126 14:32:01.837711 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:32:01 crc kubenswrapper[5037]: I1126 14:32:01.871946 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:32:11 crc kubenswrapper[5037]: I1126 14:32:11.907918 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-d8w7h" Nov 26 14:32:11 crc kubenswrapper[5037]: I1126 14:32:11.909182 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-d8w7h" Nov 26 14:32:12 crc kubenswrapper[5037]: I1126 14:32:12.116171 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-d8w7h"] Nov 26 14:32:12 crc kubenswrapper[5037]: I1126 14:32:12.134025 5037 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 14:32:12 crc kubenswrapper[5037]: I1126 14:32:12.911160 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-d8w7h" event={"ID":"2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084","Type":"ContainerStarted","Data":"109a1d11745a385c0a5b468384c10db9a249610d820452f512670a389bd61c49"} Nov 26 14:32:13 crc kubenswrapper[5037]: I1126 14:32:13.919862 5037 generic.go:334] "Generic (PLEG): container finished" podID="2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084" containerID="93616966e63bd753972333e303315a0f459b8e3a20a835a562b811bb820af01e" exitCode=0 Nov 26 14:32:13 crc kubenswrapper[5037]: I1126 14:32:13.920144 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-d8w7h" event={"ID":"2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084","Type":"ContainerDied","Data":"93616966e63bd753972333e303315a0f459b8e3a20a835a562b811bb820af01e"} Nov 26 14:32:15 crc kubenswrapper[5037]: I1126 14:32:15.135756 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-d8w7h" Nov 26 14:32:15 crc kubenswrapper[5037]: I1126 14:32:15.300013 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084-node-mnt\") pod \"2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084\" (UID: \"2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084\") " Nov 26 14:32:15 crc kubenswrapper[5037]: I1126 14:32:15.300116 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jttq7\" (UniqueName: \"kubernetes.io/projected/2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084-kube-api-access-jttq7\") pod \"2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084\" (UID: \"2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084\") " Nov 26 14:32:15 crc kubenswrapper[5037]: I1126 14:32:15.300143 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084" (UID: "2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:32:15 crc kubenswrapper[5037]: I1126 14:32:15.300179 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084-crc-storage\") pod \"2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084\" (UID: \"2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084\") " Nov 26 14:32:15 crc kubenswrapper[5037]: I1126 14:32:15.300847 5037 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084-node-mnt\") on node \"crc\" DevicePath \"\"" Nov 26 14:32:15 crc kubenswrapper[5037]: I1126 14:32:15.307604 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084-kube-api-access-jttq7" (OuterVolumeSpecName: "kube-api-access-jttq7") pod "2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084" (UID: "2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084"). InnerVolumeSpecName "kube-api-access-jttq7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:32:15 crc kubenswrapper[5037]: I1126 14:32:15.313442 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084" (UID: "2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:32:15 crc kubenswrapper[5037]: I1126 14:32:15.401593 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jttq7\" (UniqueName: \"kubernetes.io/projected/2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084-kube-api-access-jttq7\") on node \"crc\" DevicePath \"\"" Nov 26 14:32:15 crc kubenswrapper[5037]: I1126 14:32:15.401639 5037 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084-crc-storage\") on node \"crc\" DevicePath \"\"" Nov 26 14:32:15 crc kubenswrapper[5037]: I1126 14:32:15.937118 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-d8w7h" event={"ID":"2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084","Type":"ContainerDied","Data":"109a1d11745a385c0a5b468384c10db9a249610d820452f512670a389bd61c49"} Nov 26 14:32:15 crc kubenswrapper[5037]: I1126 14:32:15.937395 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-d8w7h" Nov 26 14:32:15 crc kubenswrapper[5037]: I1126 14:32:15.937413 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="109a1d11745a385c0a5b468384c10db9a249610d820452f512670a389bd61c49" Nov 26 14:32:22 crc kubenswrapper[5037]: I1126 14:32:22.439837 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772egfv62"] Nov 26 14:32:22 crc kubenswrapper[5037]: E1126 14:32:22.440652 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084" containerName="storage" Nov 26 14:32:22 crc kubenswrapper[5037]: I1126 14:32:22.440663 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084" containerName="storage" Nov 26 14:32:22 crc kubenswrapper[5037]: I1126 14:32:22.440773 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c220b7d-22bb-4ed9-8ec1-6c5bdb3be084" containerName="storage" Nov 26 14:32:22 crc kubenswrapper[5037]: I1126 14:32:22.441489 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772egfv62" Nov 26 14:32:22 crc kubenswrapper[5037]: I1126 14:32:22.444123 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 26 14:32:22 crc kubenswrapper[5037]: I1126 14:32:22.453446 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772egfv62"] Nov 26 14:32:22 crc kubenswrapper[5037]: I1126 14:32:22.501391 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d819d36f-4bb5-4baf-aa77-76cf0554d458-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772egfv62\" (UID: \"d819d36f-4bb5-4baf-aa77-76cf0554d458\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772egfv62" Nov 26 14:32:22 crc kubenswrapper[5037]: I1126 14:32:22.501496 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d819d36f-4bb5-4baf-aa77-76cf0554d458-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772egfv62\" (UID: \"d819d36f-4bb5-4baf-aa77-76cf0554d458\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772egfv62" Nov 26 14:32:22 crc kubenswrapper[5037]: I1126 14:32:22.501531 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8bt9l\" (UniqueName: \"kubernetes.io/projected/d819d36f-4bb5-4baf-aa77-76cf0554d458-kube-api-access-8bt9l\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772egfv62\" (UID: \"d819d36f-4bb5-4baf-aa77-76cf0554d458\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772egfv62" Nov 26 14:32:22 crc kubenswrapper[5037]: I1126 14:32:22.603681 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d819d36f-4bb5-4baf-aa77-76cf0554d458-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772egfv62\" (UID: \"d819d36f-4bb5-4baf-aa77-76cf0554d458\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772egfv62" Nov 26 14:32:22 crc kubenswrapper[5037]: I1126 14:32:22.603719 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8bt9l\" (UniqueName: \"kubernetes.io/projected/d819d36f-4bb5-4baf-aa77-76cf0554d458-kube-api-access-8bt9l\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772egfv62\" (UID: \"d819d36f-4bb5-4baf-aa77-76cf0554d458\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772egfv62" Nov 26 14:32:22 crc kubenswrapper[5037]: I1126 14:32:22.603758 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d819d36f-4bb5-4baf-aa77-76cf0554d458-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772egfv62\" (UID: \"d819d36f-4bb5-4baf-aa77-76cf0554d458\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772egfv62" Nov 26 14:32:22 crc kubenswrapper[5037]: I1126 14:32:22.604165 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d819d36f-4bb5-4baf-aa77-76cf0554d458-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772egfv62\" (UID: \"d819d36f-4bb5-4baf-aa77-76cf0554d458\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772egfv62" Nov 26 14:32:22 crc kubenswrapper[5037]: I1126 14:32:22.604219 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d819d36f-4bb5-4baf-aa77-76cf0554d458-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772egfv62\" (UID: \"d819d36f-4bb5-4baf-aa77-76cf0554d458\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772egfv62" Nov 26 14:32:22 crc kubenswrapper[5037]: I1126 14:32:22.625274 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8bt9l\" (UniqueName: \"kubernetes.io/projected/d819d36f-4bb5-4baf-aa77-76cf0554d458-kube-api-access-8bt9l\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772egfv62\" (UID: \"d819d36f-4bb5-4baf-aa77-76cf0554d458\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772egfv62" Nov 26 14:32:22 crc kubenswrapper[5037]: I1126 14:32:22.758865 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772egfv62" Nov 26 14:32:22 crc kubenswrapper[5037]: I1126 14:32:22.951761 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772egfv62"] Nov 26 14:32:22 crc kubenswrapper[5037]: I1126 14:32:22.974351 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772egfv62" event={"ID":"d819d36f-4bb5-4baf-aa77-76cf0554d458","Type":"ContainerStarted","Data":"2cd3b9213c9a295bc8a1c2bb6afb68f8799b15a47b875846f1ac9e8009028f89"} Nov 26 14:32:23 crc kubenswrapper[5037]: I1126 14:32:23.983321 5037 generic.go:334] "Generic (PLEG): container finished" podID="d819d36f-4bb5-4baf-aa77-76cf0554d458" containerID="ddc0776bb46a9e5c86be68ad4cc9593e88d0954cba526c66f4867dc029da28fa" exitCode=0 Nov 26 14:32:23 crc kubenswrapper[5037]: I1126 14:32:23.984491 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772egfv62" event={"ID":"d819d36f-4bb5-4baf-aa77-76cf0554d458","Type":"ContainerDied","Data":"ddc0776bb46a9e5c86be68ad4cc9593e88d0954cba526c66f4867dc029da28fa"} Nov 26 14:32:24 crc kubenswrapper[5037]: I1126 14:32:24.528339 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-9v68f" Nov 26 14:32:27 crc kubenswrapper[5037]: I1126 14:32:27.004010 5037 generic.go:334] "Generic (PLEG): container finished" podID="d819d36f-4bb5-4baf-aa77-76cf0554d458" containerID="457c4fdc4d41c7ca5d1cfc881c3141f369836c34cf25f046943ff9e353aa18c7" exitCode=0 Nov 26 14:32:27 crc kubenswrapper[5037]: I1126 14:32:27.004073 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772egfv62" event={"ID":"d819d36f-4bb5-4baf-aa77-76cf0554d458","Type":"ContainerDied","Data":"457c4fdc4d41c7ca5d1cfc881c3141f369836c34cf25f046943ff9e353aa18c7"} Nov 26 14:32:28 crc kubenswrapper[5037]: I1126 14:32:28.012189 5037 generic.go:334] "Generic (PLEG): container finished" podID="d819d36f-4bb5-4baf-aa77-76cf0554d458" containerID="018edeb2540efdff7f2e625499daef7cca86f56318d34808f90bc0c298320285" exitCode=0 Nov 26 14:32:28 crc kubenswrapper[5037]: I1126 14:32:28.012273 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772egfv62" event={"ID":"d819d36f-4bb5-4baf-aa77-76cf0554d458","Type":"ContainerDied","Data":"018edeb2540efdff7f2e625499daef7cca86f56318d34808f90bc0c298320285"} Nov 26 14:32:29 crc kubenswrapper[5037]: I1126 14:32:29.255419 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772egfv62" Nov 26 14:32:29 crc kubenswrapper[5037]: I1126 14:32:29.408806 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8bt9l\" (UniqueName: \"kubernetes.io/projected/d819d36f-4bb5-4baf-aa77-76cf0554d458-kube-api-access-8bt9l\") pod \"d819d36f-4bb5-4baf-aa77-76cf0554d458\" (UID: \"d819d36f-4bb5-4baf-aa77-76cf0554d458\") " Nov 26 14:32:29 crc kubenswrapper[5037]: I1126 14:32:29.408890 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d819d36f-4bb5-4baf-aa77-76cf0554d458-util\") pod \"d819d36f-4bb5-4baf-aa77-76cf0554d458\" (UID: \"d819d36f-4bb5-4baf-aa77-76cf0554d458\") " Nov 26 14:32:29 crc kubenswrapper[5037]: I1126 14:32:29.408954 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d819d36f-4bb5-4baf-aa77-76cf0554d458-bundle\") pod \"d819d36f-4bb5-4baf-aa77-76cf0554d458\" (UID: \"d819d36f-4bb5-4baf-aa77-76cf0554d458\") " Nov 26 14:32:29 crc kubenswrapper[5037]: I1126 14:32:29.410222 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d819d36f-4bb5-4baf-aa77-76cf0554d458-bundle" (OuterVolumeSpecName: "bundle") pod "d819d36f-4bb5-4baf-aa77-76cf0554d458" (UID: "d819d36f-4bb5-4baf-aa77-76cf0554d458"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:32:29 crc kubenswrapper[5037]: I1126 14:32:29.419583 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d819d36f-4bb5-4baf-aa77-76cf0554d458-kube-api-access-8bt9l" (OuterVolumeSpecName: "kube-api-access-8bt9l") pod "d819d36f-4bb5-4baf-aa77-76cf0554d458" (UID: "d819d36f-4bb5-4baf-aa77-76cf0554d458"). InnerVolumeSpecName "kube-api-access-8bt9l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:32:29 crc kubenswrapper[5037]: I1126 14:32:29.426010 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d819d36f-4bb5-4baf-aa77-76cf0554d458-util" (OuterVolumeSpecName: "util") pod "d819d36f-4bb5-4baf-aa77-76cf0554d458" (UID: "d819d36f-4bb5-4baf-aa77-76cf0554d458"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:32:29 crc kubenswrapper[5037]: I1126 14:32:29.511159 5037 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d819d36f-4bb5-4baf-aa77-76cf0554d458-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:32:29 crc kubenswrapper[5037]: I1126 14:32:29.511193 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8bt9l\" (UniqueName: \"kubernetes.io/projected/d819d36f-4bb5-4baf-aa77-76cf0554d458-kube-api-access-8bt9l\") on node \"crc\" DevicePath \"\"" Nov 26 14:32:29 crc kubenswrapper[5037]: I1126 14:32:29.511206 5037 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d819d36f-4bb5-4baf-aa77-76cf0554d458-util\") on node \"crc\" DevicePath \"\"" Nov 26 14:32:30 crc kubenswrapper[5037]: I1126 14:32:30.028636 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772egfv62" event={"ID":"d819d36f-4bb5-4baf-aa77-76cf0554d458","Type":"ContainerDied","Data":"2cd3b9213c9a295bc8a1c2bb6afb68f8799b15a47b875846f1ac9e8009028f89"} Nov 26 14:32:30 crc kubenswrapper[5037]: I1126 14:32:30.028699 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2cd3b9213c9a295bc8a1c2bb6afb68f8799b15a47b875846f1ac9e8009028f89" Nov 26 14:32:30 crc kubenswrapper[5037]: I1126 14:32:30.028809 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772egfv62" Nov 26 14:32:33 crc kubenswrapper[5037]: I1126 14:32:33.996953 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-bqpl4"] Nov 26 14:32:33 crc kubenswrapper[5037]: E1126 14:32:33.997597 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d819d36f-4bb5-4baf-aa77-76cf0554d458" containerName="util" Nov 26 14:32:33 crc kubenswrapper[5037]: I1126 14:32:33.997615 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="d819d36f-4bb5-4baf-aa77-76cf0554d458" containerName="util" Nov 26 14:32:33 crc kubenswrapper[5037]: E1126 14:32:33.997633 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d819d36f-4bb5-4baf-aa77-76cf0554d458" containerName="pull" Nov 26 14:32:33 crc kubenswrapper[5037]: I1126 14:32:33.997642 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="d819d36f-4bb5-4baf-aa77-76cf0554d458" containerName="pull" Nov 26 14:32:33 crc kubenswrapper[5037]: E1126 14:32:33.997658 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d819d36f-4bb5-4baf-aa77-76cf0554d458" containerName="extract" Nov 26 14:32:33 crc kubenswrapper[5037]: I1126 14:32:33.997666 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="d819d36f-4bb5-4baf-aa77-76cf0554d458" containerName="extract" Nov 26 14:32:33 crc kubenswrapper[5037]: I1126 14:32:33.997783 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="d819d36f-4bb5-4baf-aa77-76cf0554d458" containerName="extract" Nov 26 14:32:33 crc kubenswrapper[5037]: I1126 14:32:33.998304 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-bqpl4" Nov 26 14:32:34 crc kubenswrapper[5037]: I1126 14:32:34.000495 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 26 14:32:34 crc kubenswrapper[5037]: I1126 14:32:34.000779 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-2gb8s" Nov 26 14:32:34 crc kubenswrapper[5037]: I1126 14:32:34.000944 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 26 14:32:34 crc kubenswrapper[5037]: I1126 14:32:34.014392 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-bqpl4"] Nov 26 14:32:34 crc kubenswrapper[5037]: I1126 14:32:34.072028 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wkqv9\" (UniqueName: \"kubernetes.io/projected/abe32f75-6048-4e43-bd89-6389ac78f149-kube-api-access-wkqv9\") pod \"nmstate-operator-557fdffb88-bqpl4\" (UID: \"abe32f75-6048-4e43-bd89-6389ac78f149\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-bqpl4" Nov 26 14:32:34 crc kubenswrapper[5037]: I1126 14:32:34.176595 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wkqv9\" (UniqueName: \"kubernetes.io/projected/abe32f75-6048-4e43-bd89-6389ac78f149-kube-api-access-wkqv9\") pod \"nmstate-operator-557fdffb88-bqpl4\" (UID: \"abe32f75-6048-4e43-bd89-6389ac78f149\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-bqpl4" Nov 26 14:32:34 crc kubenswrapper[5037]: I1126 14:32:34.195013 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wkqv9\" (UniqueName: \"kubernetes.io/projected/abe32f75-6048-4e43-bd89-6389ac78f149-kube-api-access-wkqv9\") pod \"nmstate-operator-557fdffb88-bqpl4\" (UID: \"abe32f75-6048-4e43-bd89-6389ac78f149\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-bqpl4" Nov 26 14:32:34 crc kubenswrapper[5037]: I1126 14:32:34.315565 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-bqpl4" Nov 26 14:32:34 crc kubenswrapper[5037]: I1126 14:32:34.495554 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-bqpl4"] Nov 26 14:32:35 crc kubenswrapper[5037]: I1126 14:32:35.063731 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-bqpl4" event={"ID":"abe32f75-6048-4e43-bd89-6389ac78f149","Type":"ContainerStarted","Data":"9c33d7bce7e0ccf5951f85e3120384fe1eb978763d6c1473cbba2b5dbadc798a"} Nov 26 14:32:39 crc kubenswrapper[5037]: I1126 14:32:39.092474 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-bqpl4" event={"ID":"abe32f75-6048-4e43-bd89-6389ac78f149","Type":"ContainerStarted","Data":"a6b0ee45d18eaf4877db6449752fa17c559d3c536288be5d7cdb15fd27606db4"} Nov 26 14:32:39 crc kubenswrapper[5037]: I1126 14:32:39.112717 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-557fdffb88-bqpl4" podStartSLOduration=1.828753452 podStartE2EDuration="6.112696562s" podCreationTimestamp="2025-11-26 14:32:33 +0000 UTC" firstStartedPulling="2025-11-26 14:32:34.509204472 +0000 UTC m=+1021.305974656" lastFinishedPulling="2025-11-26 14:32:38.793147592 +0000 UTC m=+1025.589917766" observedRunningTime="2025-11-26 14:32:39.109772871 +0000 UTC m=+1025.906543065" watchObservedRunningTime="2025-11-26 14:32:39.112696562 +0000 UTC m=+1025.909466756" Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.657956 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-w2xlk"] Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.658994 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-w2xlk" Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.661756 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-hnvd8" Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.675871 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-w2xlk"] Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.680639 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-s2jcr"] Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.681518 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-s2jcr" Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.683378 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.684581 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5d458\" (UniqueName: \"kubernetes.io/projected/4c1a7ee3-c2c6-496d-a366-5b3e4da21c04-kube-api-access-5d458\") pod \"nmstate-metrics-5dcf9c57c5-w2xlk\" (UID: \"4c1a7ee3-c2c6-496d-a366-5b3e4da21c04\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-w2xlk" Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.697511 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-s2jcr"] Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.706978 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-mhw28"] Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.707843 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-mhw28" Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.785653 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5d458\" (UniqueName: \"kubernetes.io/projected/4c1a7ee3-c2c6-496d-a366-5b3e4da21c04-kube-api-access-5d458\") pod \"nmstate-metrics-5dcf9c57c5-w2xlk\" (UID: \"4c1a7ee3-c2c6-496d-a366-5b3e4da21c04\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-w2xlk" Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.785724 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9rhsj\" (UniqueName: \"kubernetes.io/projected/db122af0-7421-424b-8de3-2f463d65cbdc-kube-api-access-9rhsj\") pod \"nmstate-handler-mhw28\" (UID: \"db122af0-7421-424b-8de3-2f463d65cbdc\") " pod="openshift-nmstate/nmstate-handler-mhw28" Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.785763 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/2e990df6-f7f3-4d6b-9a15-ea5f85abdb66-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-s2jcr\" (UID: \"2e990df6-f7f3-4d6b-9a15-ea5f85abdb66\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-s2jcr" Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.785798 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/db122af0-7421-424b-8de3-2f463d65cbdc-nmstate-lock\") pod \"nmstate-handler-mhw28\" (UID: \"db122af0-7421-424b-8de3-2f463d65cbdc\") " pod="openshift-nmstate/nmstate-handler-mhw28" Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.785822 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/db122af0-7421-424b-8de3-2f463d65cbdc-dbus-socket\") pod \"nmstate-handler-mhw28\" (UID: \"db122af0-7421-424b-8de3-2f463d65cbdc\") " pod="openshift-nmstate/nmstate-handler-mhw28" Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.785852 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/db122af0-7421-424b-8de3-2f463d65cbdc-ovs-socket\") pod \"nmstate-handler-mhw28\" (UID: \"db122af0-7421-424b-8de3-2f463d65cbdc\") " pod="openshift-nmstate/nmstate-handler-mhw28" Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.785938 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7b4gk\" (UniqueName: \"kubernetes.io/projected/2e990df6-f7f3-4d6b-9a15-ea5f85abdb66-kube-api-access-7b4gk\") pod \"nmstate-webhook-6b89b748d8-s2jcr\" (UID: \"2e990df6-f7f3-4d6b-9a15-ea5f85abdb66\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-s2jcr" Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.802423 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-74fql"] Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.803263 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-74fql" Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.805218 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.806349 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-7frdf" Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.806955 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.818580 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-74fql"] Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.833356 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5d458\" (UniqueName: \"kubernetes.io/projected/4c1a7ee3-c2c6-496d-a366-5b3e4da21c04-kube-api-access-5d458\") pod \"nmstate-metrics-5dcf9c57c5-w2xlk\" (UID: \"4c1a7ee3-c2c6-496d-a366-5b3e4da21c04\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-w2xlk" Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.886664 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9vd2k\" (UniqueName: \"kubernetes.io/projected/fbba51e7-c4f1-4211-a970-f299ef0a6ed9-kube-api-access-9vd2k\") pod \"nmstate-console-plugin-5874bd7bc5-74fql\" (UID: \"fbba51e7-c4f1-4211-a970-f299ef0a6ed9\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-74fql" Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.886730 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9rhsj\" (UniqueName: \"kubernetes.io/projected/db122af0-7421-424b-8de3-2f463d65cbdc-kube-api-access-9rhsj\") pod \"nmstate-handler-mhw28\" (UID: \"db122af0-7421-424b-8de3-2f463d65cbdc\") " pod="openshift-nmstate/nmstate-handler-mhw28" Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.886768 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/2e990df6-f7f3-4d6b-9a15-ea5f85abdb66-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-s2jcr\" (UID: \"2e990df6-f7f3-4d6b-9a15-ea5f85abdb66\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-s2jcr" Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.886801 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/db122af0-7421-424b-8de3-2f463d65cbdc-nmstate-lock\") pod \"nmstate-handler-mhw28\" (UID: \"db122af0-7421-424b-8de3-2f463d65cbdc\") " pod="openshift-nmstate/nmstate-handler-mhw28" Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.886826 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/db122af0-7421-424b-8de3-2f463d65cbdc-dbus-socket\") pod \"nmstate-handler-mhw28\" (UID: \"db122af0-7421-424b-8de3-2f463d65cbdc\") " pod="openshift-nmstate/nmstate-handler-mhw28" Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.886844 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/db122af0-7421-424b-8de3-2f463d65cbdc-ovs-socket\") pod \"nmstate-handler-mhw28\" (UID: \"db122af0-7421-424b-8de3-2f463d65cbdc\") " pod="openshift-nmstate/nmstate-handler-mhw28" Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.886864 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/fbba51e7-c4f1-4211-a970-f299ef0a6ed9-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-74fql\" (UID: \"fbba51e7-c4f1-4211-a970-f299ef0a6ed9\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-74fql" Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.886886 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7b4gk\" (UniqueName: \"kubernetes.io/projected/2e990df6-f7f3-4d6b-9a15-ea5f85abdb66-kube-api-access-7b4gk\") pod \"nmstate-webhook-6b89b748d8-s2jcr\" (UID: \"2e990df6-f7f3-4d6b-9a15-ea5f85abdb66\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-s2jcr" Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.886918 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/fbba51e7-c4f1-4211-a970-f299ef0a6ed9-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-74fql\" (UID: \"fbba51e7-c4f1-4211-a970-f299ef0a6ed9\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-74fql" Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.887087 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/db122af0-7421-424b-8de3-2f463d65cbdc-ovs-socket\") pod \"nmstate-handler-mhw28\" (UID: \"db122af0-7421-424b-8de3-2f463d65cbdc\") " pod="openshift-nmstate/nmstate-handler-mhw28" Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.887131 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/db122af0-7421-424b-8de3-2f463d65cbdc-nmstate-lock\") pod \"nmstate-handler-mhw28\" (UID: \"db122af0-7421-424b-8de3-2f463d65cbdc\") " pod="openshift-nmstate/nmstate-handler-mhw28" Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.887364 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/db122af0-7421-424b-8de3-2f463d65cbdc-dbus-socket\") pod \"nmstate-handler-mhw28\" (UID: \"db122af0-7421-424b-8de3-2f463d65cbdc\") " pod="openshift-nmstate/nmstate-handler-mhw28" Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.890657 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/2e990df6-f7f3-4d6b-9a15-ea5f85abdb66-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-s2jcr\" (UID: \"2e990df6-f7f3-4d6b-9a15-ea5f85abdb66\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-s2jcr" Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.908351 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9rhsj\" (UniqueName: \"kubernetes.io/projected/db122af0-7421-424b-8de3-2f463d65cbdc-kube-api-access-9rhsj\") pod \"nmstate-handler-mhw28\" (UID: \"db122af0-7421-424b-8de3-2f463d65cbdc\") " pod="openshift-nmstate/nmstate-handler-mhw28" Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.924625 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7b4gk\" (UniqueName: \"kubernetes.io/projected/2e990df6-f7f3-4d6b-9a15-ea5f85abdb66-kube-api-access-7b4gk\") pod \"nmstate-webhook-6b89b748d8-s2jcr\" (UID: \"2e990df6-f7f3-4d6b-9a15-ea5f85abdb66\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-s2jcr" Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.977404 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-w2xlk" Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.987747 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/fbba51e7-c4f1-4211-a970-f299ef0a6ed9-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-74fql\" (UID: \"fbba51e7-c4f1-4211-a970-f299ef0a6ed9\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-74fql" Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.987802 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/fbba51e7-c4f1-4211-a970-f299ef0a6ed9-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-74fql\" (UID: \"fbba51e7-c4f1-4211-a970-f299ef0a6ed9\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-74fql" Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.987902 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9vd2k\" (UniqueName: \"kubernetes.io/projected/fbba51e7-c4f1-4211-a970-f299ef0a6ed9-kube-api-access-9vd2k\") pod \"nmstate-console-plugin-5874bd7bc5-74fql\" (UID: \"fbba51e7-c4f1-4211-a970-f299ef0a6ed9\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-74fql" Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.989070 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/fbba51e7-c4f1-4211-a970-f299ef0a6ed9-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-74fql\" (UID: \"fbba51e7-c4f1-4211-a970-f299ef0a6ed9\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-74fql" Nov 26 14:32:42 crc kubenswrapper[5037]: E1126 14:32:42.989238 5037 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Nov 26 14:32:42 crc kubenswrapper[5037]: E1126 14:32:42.989334 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fbba51e7-c4f1-4211-a970-f299ef0a6ed9-plugin-serving-cert podName:fbba51e7-c4f1-4211-a970-f299ef0a6ed9 nodeName:}" failed. No retries permitted until 2025-11-26 14:32:43.489312281 +0000 UTC m=+1030.286082525 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/fbba51e7-c4f1-4211-a970-f299ef0a6ed9-plugin-serving-cert") pod "nmstate-console-plugin-5874bd7bc5-74fql" (UID: "fbba51e7-c4f1-4211-a970-f299ef0a6ed9") : secret "plugin-serving-cert" not found Nov 26 14:32:42 crc kubenswrapper[5037]: I1126 14:32:42.997378 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-s2jcr" Nov 26 14:32:43 crc kubenswrapper[5037]: I1126 14:32:43.004440 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-7dd4888dc-whk98"] Nov 26 14:32:43 crc kubenswrapper[5037]: I1126 14:32:43.005161 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7dd4888dc-whk98" Nov 26 14:32:43 crc kubenswrapper[5037]: I1126 14:32:43.006975 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9vd2k\" (UniqueName: \"kubernetes.io/projected/fbba51e7-c4f1-4211-a970-f299ef0a6ed9-kube-api-access-9vd2k\") pod \"nmstate-console-plugin-5874bd7bc5-74fql\" (UID: \"fbba51e7-c4f1-4211-a970-f299ef0a6ed9\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-74fql" Nov 26 14:32:43 crc kubenswrapper[5037]: I1126 14:32:43.024430 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-7dd4888dc-whk98"] Nov 26 14:32:43 crc kubenswrapper[5037]: I1126 14:32:43.024616 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-mhw28" Nov 26 14:32:43 crc kubenswrapper[5037]: W1126 14:32:43.075111 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddb122af0_7421_424b_8de3_2f463d65cbdc.slice/crio-92ba22399d08d883c7da153130161a194511a3f9a1daa9e6cf0592623c834d28 WatchSource:0}: Error finding container 92ba22399d08d883c7da153130161a194511a3f9a1daa9e6cf0592623c834d28: Status 404 returned error can't find the container with id 92ba22399d08d883c7da153130161a194511a3f9a1daa9e6cf0592623c834d28 Nov 26 14:32:43 crc kubenswrapper[5037]: I1126 14:32:43.089474 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/315f8a88-fe0a-416e-af72-938fe3bf35b5-oauth-serving-cert\") pod \"console-7dd4888dc-whk98\" (UID: \"315f8a88-fe0a-416e-af72-938fe3bf35b5\") " pod="openshift-console/console-7dd4888dc-whk98" Nov 26 14:32:43 crc kubenswrapper[5037]: I1126 14:32:43.089513 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/315f8a88-fe0a-416e-af72-938fe3bf35b5-service-ca\") pod \"console-7dd4888dc-whk98\" (UID: \"315f8a88-fe0a-416e-af72-938fe3bf35b5\") " pod="openshift-console/console-7dd4888dc-whk98" Nov 26 14:32:43 crc kubenswrapper[5037]: I1126 14:32:43.089532 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lctfj\" (UniqueName: \"kubernetes.io/projected/315f8a88-fe0a-416e-af72-938fe3bf35b5-kube-api-access-lctfj\") pod \"console-7dd4888dc-whk98\" (UID: \"315f8a88-fe0a-416e-af72-938fe3bf35b5\") " pod="openshift-console/console-7dd4888dc-whk98" Nov 26 14:32:43 crc kubenswrapper[5037]: I1126 14:32:43.089588 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/315f8a88-fe0a-416e-af72-938fe3bf35b5-console-oauth-config\") pod \"console-7dd4888dc-whk98\" (UID: \"315f8a88-fe0a-416e-af72-938fe3bf35b5\") " pod="openshift-console/console-7dd4888dc-whk98" Nov 26 14:32:43 crc kubenswrapper[5037]: I1126 14:32:43.089621 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/315f8a88-fe0a-416e-af72-938fe3bf35b5-console-serving-cert\") pod \"console-7dd4888dc-whk98\" (UID: \"315f8a88-fe0a-416e-af72-938fe3bf35b5\") " pod="openshift-console/console-7dd4888dc-whk98" Nov 26 14:32:43 crc kubenswrapper[5037]: I1126 14:32:43.089637 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/315f8a88-fe0a-416e-af72-938fe3bf35b5-trusted-ca-bundle\") pod \"console-7dd4888dc-whk98\" (UID: \"315f8a88-fe0a-416e-af72-938fe3bf35b5\") " pod="openshift-console/console-7dd4888dc-whk98" Nov 26 14:32:43 crc kubenswrapper[5037]: I1126 14:32:43.089663 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/315f8a88-fe0a-416e-af72-938fe3bf35b5-console-config\") pod \"console-7dd4888dc-whk98\" (UID: \"315f8a88-fe0a-416e-af72-938fe3bf35b5\") " pod="openshift-console/console-7dd4888dc-whk98" Nov 26 14:32:43 crc kubenswrapper[5037]: I1126 14:32:43.123024 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-mhw28" event={"ID":"db122af0-7421-424b-8de3-2f463d65cbdc","Type":"ContainerStarted","Data":"92ba22399d08d883c7da153130161a194511a3f9a1daa9e6cf0592623c834d28"} Nov 26 14:32:43 crc kubenswrapper[5037]: I1126 14:32:43.190907 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/315f8a88-fe0a-416e-af72-938fe3bf35b5-console-config\") pod \"console-7dd4888dc-whk98\" (UID: \"315f8a88-fe0a-416e-af72-938fe3bf35b5\") " pod="openshift-console/console-7dd4888dc-whk98" Nov 26 14:32:43 crc kubenswrapper[5037]: I1126 14:32:43.190962 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/315f8a88-fe0a-416e-af72-938fe3bf35b5-oauth-serving-cert\") pod \"console-7dd4888dc-whk98\" (UID: \"315f8a88-fe0a-416e-af72-938fe3bf35b5\") " pod="openshift-console/console-7dd4888dc-whk98" Nov 26 14:32:43 crc kubenswrapper[5037]: I1126 14:32:43.190983 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/315f8a88-fe0a-416e-af72-938fe3bf35b5-service-ca\") pod \"console-7dd4888dc-whk98\" (UID: \"315f8a88-fe0a-416e-af72-938fe3bf35b5\") " pod="openshift-console/console-7dd4888dc-whk98" Nov 26 14:32:43 crc kubenswrapper[5037]: I1126 14:32:43.191008 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lctfj\" (UniqueName: \"kubernetes.io/projected/315f8a88-fe0a-416e-af72-938fe3bf35b5-kube-api-access-lctfj\") pod \"console-7dd4888dc-whk98\" (UID: \"315f8a88-fe0a-416e-af72-938fe3bf35b5\") " pod="openshift-console/console-7dd4888dc-whk98" Nov 26 14:32:43 crc kubenswrapper[5037]: I1126 14:32:43.191061 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/315f8a88-fe0a-416e-af72-938fe3bf35b5-console-oauth-config\") pod \"console-7dd4888dc-whk98\" (UID: \"315f8a88-fe0a-416e-af72-938fe3bf35b5\") " pod="openshift-console/console-7dd4888dc-whk98" Nov 26 14:32:43 crc kubenswrapper[5037]: I1126 14:32:43.191084 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/315f8a88-fe0a-416e-af72-938fe3bf35b5-console-serving-cert\") pod \"console-7dd4888dc-whk98\" (UID: \"315f8a88-fe0a-416e-af72-938fe3bf35b5\") " pod="openshift-console/console-7dd4888dc-whk98" Nov 26 14:32:43 crc kubenswrapper[5037]: I1126 14:32:43.191100 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/315f8a88-fe0a-416e-af72-938fe3bf35b5-trusted-ca-bundle\") pod \"console-7dd4888dc-whk98\" (UID: \"315f8a88-fe0a-416e-af72-938fe3bf35b5\") " pod="openshift-console/console-7dd4888dc-whk98" Nov 26 14:32:43 crc kubenswrapper[5037]: I1126 14:32:43.191958 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/315f8a88-fe0a-416e-af72-938fe3bf35b5-console-config\") pod \"console-7dd4888dc-whk98\" (UID: \"315f8a88-fe0a-416e-af72-938fe3bf35b5\") " pod="openshift-console/console-7dd4888dc-whk98" Nov 26 14:32:43 crc kubenswrapper[5037]: I1126 14:32:43.192036 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/315f8a88-fe0a-416e-af72-938fe3bf35b5-service-ca\") pod \"console-7dd4888dc-whk98\" (UID: \"315f8a88-fe0a-416e-af72-938fe3bf35b5\") " pod="openshift-console/console-7dd4888dc-whk98" Nov 26 14:32:43 crc kubenswrapper[5037]: I1126 14:32:43.192159 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/315f8a88-fe0a-416e-af72-938fe3bf35b5-trusted-ca-bundle\") pod \"console-7dd4888dc-whk98\" (UID: \"315f8a88-fe0a-416e-af72-938fe3bf35b5\") " pod="openshift-console/console-7dd4888dc-whk98" Nov 26 14:32:43 crc kubenswrapper[5037]: I1126 14:32:43.193067 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/315f8a88-fe0a-416e-af72-938fe3bf35b5-oauth-serving-cert\") pod \"console-7dd4888dc-whk98\" (UID: \"315f8a88-fe0a-416e-af72-938fe3bf35b5\") " pod="openshift-console/console-7dd4888dc-whk98" Nov 26 14:32:43 crc kubenswrapper[5037]: I1126 14:32:43.196113 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/315f8a88-fe0a-416e-af72-938fe3bf35b5-console-serving-cert\") pod \"console-7dd4888dc-whk98\" (UID: \"315f8a88-fe0a-416e-af72-938fe3bf35b5\") " pod="openshift-console/console-7dd4888dc-whk98" Nov 26 14:32:43 crc kubenswrapper[5037]: I1126 14:32:43.201090 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/315f8a88-fe0a-416e-af72-938fe3bf35b5-console-oauth-config\") pod \"console-7dd4888dc-whk98\" (UID: \"315f8a88-fe0a-416e-af72-938fe3bf35b5\") " pod="openshift-console/console-7dd4888dc-whk98" Nov 26 14:32:43 crc kubenswrapper[5037]: I1126 14:32:43.210671 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lctfj\" (UniqueName: \"kubernetes.io/projected/315f8a88-fe0a-416e-af72-938fe3bf35b5-kube-api-access-lctfj\") pod \"console-7dd4888dc-whk98\" (UID: \"315f8a88-fe0a-416e-af72-938fe3bf35b5\") " pod="openshift-console/console-7dd4888dc-whk98" Nov 26 14:32:43 crc kubenswrapper[5037]: I1126 14:32:43.212447 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-s2jcr"] Nov 26 14:32:43 crc kubenswrapper[5037]: W1126 14:32:43.217117 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2e990df6_f7f3_4d6b_9a15_ea5f85abdb66.slice/crio-ee9c63e1a478ac2c78762285d2b35db50b0c5252fc1eb3fcf86db9cf759f82e6 WatchSource:0}: Error finding container ee9c63e1a478ac2c78762285d2b35db50b0c5252fc1eb3fcf86db9cf759f82e6: Status 404 returned error can't find the container with id ee9c63e1a478ac2c78762285d2b35db50b0c5252fc1eb3fcf86db9cf759f82e6 Nov 26 14:32:43 crc kubenswrapper[5037]: I1126 14:32:43.256844 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-w2xlk"] Nov 26 14:32:43 crc kubenswrapper[5037]: W1126 14:32:43.263537 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4c1a7ee3_c2c6_496d_a366_5b3e4da21c04.slice/crio-8b294de74c4869ed2de401584e7002e9ab7d21977f970d69f87c84f2214ded70 WatchSource:0}: Error finding container 8b294de74c4869ed2de401584e7002e9ab7d21977f970d69f87c84f2214ded70: Status 404 returned error can't find the container with id 8b294de74c4869ed2de401584e7002e9ab7d21977f970d69f87c84f2214ded70 Nov 26 14:32:43 crc kubenswrapper[5037]: I1126 14:32:43.335259 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7dd4888dc-whk98" Nov 26 14:32:43 crc kubenswrapper[5037]: I1126 14:32:43.496463 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/fbba51e7-c4f1-4211-a970-f299ef0a6ed9-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-74fql\" (UID: \"fbba51e7-c4f1-4211-a970-f299ef0a6ed9\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-74fql" Nov 26 14:32:43 crc kubenswrapper[5037]: I1126 14:32:43.507477 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/fbba51e7-c4f1-4211-a970-f299ef0a6ed9-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-74fql\" (UID: \"fbba51e7-c4f1-4211-a970-f299ef0a6ed9\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-74fql" Nov 26 14:32:43 crc kubenswrapper[5037]: I1126 14:32:43.576711 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-7dd4888dc-whk98"] Nov 26 14:32:43 crc kubenswrapper[5037]: I1126 14:32:43.716928 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-74fql" Nov 26 14:32:44 crc kubenswrapper[5037]: I1126 14:32:44.131107 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-74fql"] Nov 26 14:32:44 crc kubenswrapper[5037]: I1126 14:32:44.133057 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7dd4888dc-whk98" event={"ID":"315f8a88-fe0a-416e-af72-938fe3bf35b5","Type":"ContainerStarted","Data":"5f55a8cea106efc7c526281c2e323155c3c5ac12d2a233b349bddc1891926735"} Nov 26 14:32:44 crc kubenswrapper[5037]: I1126 14:32:44.133114 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7dd4888dc-whk98" event={"ID":"315f8a88-fe0a-416e-af72-938fe3bf35b5","Type":"ContainerStarted","Data":"98b1c9810d36a80ff491f2d932a9712d04435fcd37ad74f6149a056765f4ca6f"} Nov 26 14:32:44 crc kubenswrapper[5037]: I1126 14:32:44.134462 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-s2jcr" event={"ID":"2e990df6-f7f3-4d6b-9a15-ea5f85abdb66","Type":"ContainerStarted","Data":"ee9c63e1a478ac2c78762285d2b35db50b0c5252fc1eb3fcf86db9cf759f82e6"} Nov 26 14:32:44 crc kubenswrapper[5037]: I1126 14:32:44.135230 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-w2xlk" event={"ID":"4c1a7ee3-c2c6-496d-a366-5b3e4da21c04","Type":"ContainerStarted","Data":"8b294de74c4869ed2de401584e7002e9ab7d21977f970d69f87c84f2214ded70"} Nov 26 14:32:44 crc kubenswrapper[5037]: I1126 14:32:44.159245 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-7dd4888dc-whk98" podStartSLOduration=2.1592241420000002 podStartE2EDuration="2.159224142s" podCreationTimestamp="2025-11-26 14:32:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:32:44.154771413 +0000 UTC m=+1030.951541607" watchObservedRunningTime="2025-11-26 14:32:44.159224142 +0000 UTC m=+1030.955994326" Nov 26 14:32:45 crc kubenswrapper[5037]: I1126 14:32:45.143481 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-74fql" event={"ID":"fbba51e7-c4f1-4211-a970-f299ef0a6ed9","Type":"ContainerStarted","Data":"261be2ef51a2435b93cf667ae98acfb34b077a119fdc6b4cc950236706b40c60"} Nov 26 14:32:47 crc kubenswrapper[5037]: I1126 14:32:47.158780 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-s2jcr" event={"ID":"2e990df6-f7f3-4d6b-9a15-ea5f85abdb66","Type":"ContainerStarted","Data":"82367bd9f5e1cf98353f7bc078b1b56862a903b21a50c1e1bd9a82454284f0f4"} Nov 26 14:32:47 crc kubenswrapper[5037]: I1126 14:32:47.159423 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-s2jcr" Nov 26 14:32:47 crc kubenswrapper[5037]: I1126 14:32:47.161787 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-mhw28" event={"ID":"db122af0-7421-424b-8de3-2f463d65cbdc","Type":"ContainerStarted","Data":"378cf1c11464443c2002fe63bb96f240f7b626024d9960f6ba92bee9a81f1827"} Nov 26 14:32:47 crc kubenswrapper[5037]: I1126 14:32:47.162154 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-mhw28" Nov 26 14:32:47 crc kubenswrapper[5037]: I1126 14:32:47.163404 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-w2xlk" event={"ID":"4c1a7ee3-c2c6-496d-a366-5b3e4da21c04","Type":"ContainerStarted","Data":"e5d2ecb3214aba308f9c55c8c745c362f1de35ba57d9d32ab8d7be337bdcd66e"} Nov 26 14:32:47 crc kubenswrapper[5037]: I1126 14:32:47.182694 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-s2jcr" podStartSLOduration=2.086094135 podStartE2EDuration="5.182674492s" podCreationTimestamp="2025-11-26 14:32:42 +0000 UTC" firstStartedPulling="2025-11-26 14:32:43.218770674 +0000 UTC m=+1030.015540858" lastFinishedPulling="2025-11-26 14:32:46.315351031 +0000 UTC m=+1033.112121215" observedRunningTime="2025-11-26 14:32:47.175834856 +0000 UTC m=+1033.972605060" watchObservedRunningTime="2025-11-26 14:32:47.182674492 +0000 UTC m=+1033.979444676" Nov 26 14:32:47 crc kubenswrapper[5037]: I1126 14:32:47.196476 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-mhw28" podStartSLOduration=1.895153189 podStartE2EDuration="5.196453047s" podCreationTimestamp="2025-11-26 14:32:42 +0000 UTC" firstStartedPulling="2025-11-26 14:32:43.078975259 +0000 UTC m=+1029.875745443" lastFinishedPulling="2025-11-26 14:32:46.380275117 +0000 UTC m=+1033.177045301" observedRunningTime="2025-11-26 14:32:47.192725236 +0000 UTC m=+1033.989495440" watchObservedRunningTime="2025-11-26 14:32:47.196453047 +0000 UTC m=+1033.993223231" Nov 26 14:32:49 crc kubenswrapper[5037]: I1126 14:32:49.175566 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-74fql" event={"ID":"fbba51e7-c4f1-4211-a970-f299ef0a6ed9","Type":"ContainerStarted","Data":"76210b8ae3abcaf6d6cf96fb5a304c01ee9765e7b655e9d52c3c61bc019285ae"} Nov 26 14:32:49 crc kubenswrapper[5037]: I1126 14:32:49.193987 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-74fql" podStartSLOduration=2.448699812 podStartE2EDuration="7.193962365s" podCreationTimestamp="2025-11-26 14:32:42 +0000 UTC" firstStartedPulling="2025-11-26 14:32:44.133088307 +0000 UTC m=+1030.929858491" lastFinishedPulling="2025-11-26 14:32:48.87835086 +0000 UTC m=+1035.675121044" observedRunningTime="2025-11-26 14:32:49.191410793 +0000 UTC m=+1035.988181007" watchObservedRunningTime="2025-11-26 14:32:49.193962365 +0000 UTC m=+1035.990732579" Nov 26 14:32:52 crc kubenswrapper[5037]: I1126 14:32:52.206807 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-w2xlk" event={"ID":"4c1a7ee3-c2c6-496d-a366-5b3e4da21c04","Type":"ContainerStarted","Data":"ae664f4452440de87a704d6fa480a40359cd421ed1844294184e8229de9508a7"} Nov 26 14:32:52 crc kubenswrapper[5037]: I1126 14:32:52.235136 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-w2xlk" podStartSLOduration=2.467950254 podStartE2EDuration="10.235109652s" podCreationTimestamp="2025-11-26 14:32:42 +0000 UTC" firstStartedPulling="2025-11-26 14:32:43.265652952 +0000 UTC m=+1030.062423136" lastFinishedPulling="2025-11-26 14:32:51.03281235 +0000 UTC m=+1037.829582534" observedRunningTime="2025-11-26 14:32:52.23014487 +0000 UTC m=+1039.026915054" watchObservedRunningTime="2025-11-26 14:32:52.235109652 +0000 UTC m=+1039.031879876" Nov 26 14:32:53 crc kubenswrapper[5037]: I1126 14:32:53.058733 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-mhw28" Nov 26 14:32:53 crc kubenswrapper[5037]: I1126 14:32:53.336030 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-7dd4888dc-whk98" Nov 26 14:32:53 crc kubenswrapper[5037]: I1126 14:32:53.336525 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-7dd4888dc-whk98" Nov 26 14:32:53 crc kubenswrapper[5037]: I1126 14:32:53.342084 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-7dd4888dc-whk98" Nov 26 14:32:54 crc kubenswrapper[5037]: I1126 14:32:54.229778 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-7dd4888dc-whk98" Nov 26 14:32:54 crc kubenswrapper[5037]: I1126 14:32:54.320491 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-qfdqh"] Nov 26 14:33:03 crc kubenswrapper[5037]: I1126 14:33:03.003723 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-s2jcr" Nov 26 14:33:11 crc kubenswrapper[5037]: I1126 14:33:11.247025 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 14:33:11 crc kubenswrapper[5037]: I1126 14:33:11.247773 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 14:33:17 crc kubenswrapper[5037]: I1126 14:33:17.421014 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6kkjdw"] Nov 26 14:33:17 crc kubenswrapper[5037]: I1126 14:33:17.423366 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6kkjdw" Nov 26 14:33:17 crc kubenswrapper[5037]: I1126 14:33:17.425523 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 26 14:33:17 crc kubenswrapper[5037]: I1126 14:33:17.434315 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6kkjdw"] Nov 26 14:33:17 crc kubenswrapper[5037]: I1126 14:33:17.582121 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6kkjdw\" (UID: \"2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6kkjdw" Nov 26 14:33:17 crc kubenswrapper[5037]: I1126 14:33:17.582504 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6kkjdw\" (UID: \"2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6kkjdw" Nov 26 14:33:17 crc kubenswrapper[5037]: I1126 14:33:17.582707 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r4548\" (UniqueName: \"kubernetes.io/projected/2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4-kube-api-access-r4548\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6kkjdw\" (UID: \"2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6kkjdw" Nov 26 14:33:17 crc kubenswrapper[5037]: I1126 14:33:17.683563 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6kkjdw\" (UID: \"2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6kkjdw" Nov 26 14:33:17 crc kubenswrapper[5037]: I1126 14:33:17.683629 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6kkjdw\" (UID: \"2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6kkjdw" Nov 26 14:33:17 crc kubenswrapper[5037]: I1126 14:33:17.683696 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r4548\" (UniqueName: \"kubernetes.io/projected/2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4-kube-api-access-r4548\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6kkjdw\" (UID: \"2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6kkjdw" Nov 26 14:33:17 crc kubenswrapper[5037]: I1126 14:33:17.684164 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6kkjdw\" (UID: \"2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6kkjdw" Nov 26 14:33:17 crc kubenswrapper[5037]: I1126 14:33:17.684514 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6kkjdw\" (UID: \"2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6kkjdw" Nov 26 14:33:17 crc kubenswrapper[5037]: I1126 14:33:17.707351 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r4548\" (UniqueName: \"kubernetes.io/projected/2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4-kube-api-access-r4548\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6kkjdw\" (UID: \"2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6kkjdw" Nov 26 14:33:17 crc kubenswrapper[5037]: I1126 14:33:17.742348 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6kkjdw" Nov 26 14:33:17 crc kubenswrapper[5037]: I1126 14:33:17.943068 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6kkjdw"] Nov 26 14:33:18 crc kubenswrapper[5037]: I1126 14:33:18.403169 5037 generic.go:334] "Generic (PLEG): container finished" podID="2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4" containerID="31a4fefa71399d0979c7219b224184fe417bb34486d58fdf6b11d2a0c2766756" exitCode=0 Nov 26 14:33:18 crc kubenswrapper[5037]: I1126 14:33:18.403239 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6kkjdw" event={"ID":"2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4","Type":"ContainerDied","Data":"31a4fefa71399d0979c7219b224184fe417bb34486d58fdf6b11d2a0c2766756"} Nov 26 14:33:18 crc kubenswrapper[5037]: I1126 14:33:18.403711 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6kkjdw" event={"ID":"2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4","Type":"ContainerStarted","Data":"3da7efd15e5a8a6d534c19b09a7888da6d58ce0f7c94171016664abd57565444"} Nov 26 14:33:19 crc kubenswrapper[5037]: I1126 14:33:19.369150 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-qfdqh" podUID="25030986-5796-4784-accd-c465c7c2daa3" containerName="console" containerID="cri-o://a391bab194b21e82f9a65c9d50c1f816a2e9570924ca487704a028e55db59c39" gracePeriod=15 Nov 26 14:33:19 crc kubenswrapper[5037]: I1126 14:33:19.930125 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-qfdqh_25030986-5796-4784-accd-c465c7c2daa3/console/0.log" Nov 26 14:33:19 crc kubenswrapper[5037]: I1126 14:33:19.930536 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-qfdqh" Nov 26 14:33:20 crc kubenswrapper[5037]: I1126 14:33:20.120400 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gqrld\" (UniqueName: \"kubernetes.io/projected/25030986-5796-4784-accd-c465c7c2daa3-kube-api-access-gqrld\") pod \"25030986-5796-4784-accd-c465c7c2daa3\" (UID: \"25030986-5796-4784-accd-c465c7c2daa3\") " Nov 26 14:33:20 crc kubenswrapper[5037]: I1126 14:33:20.120481 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/25030986-5796-4784-accd-c465c7c2daa3-service-ca\") pod \"25030986-5796-4784-accd-c465c7c2daa3\" (UID: \"25030986-5796-4784-accd-c465c7c2daa3\") " Nov 26 14:33:20 crc kubenswrapper[5037]: I1126 14:33:20.120518 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/25030986-5796-4784-accd-c465c7c2daa3-console-config\") pod \"25030986-5796-4784-accd-c465c7c2daa3\" (UID: \"25030986-5796-4784-accd-c465c7c2daa3\") " Nov 26 14:33:20 crc kubenswrapper[5037]: I1126 14:33:20.120585 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/25030986-5796-4784-accd-c465c7c2daa3-trusted-ca-bundle\") pod \"25030986-5796-4784-accd-c465c7c2daa3\" (UID: \"25030986-5796-4784-accd-c465c7c2daa3\") " Nov 26 14:33:20 crc kubenswrapper[5037]: I1126 14:33:20.120618 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/25030986-5796-4784-accd-c465c7c2daa3-console-serving-cert\") pod \"25030986-5796-4784-accd-c465c7c2daa3\" (UID: \"25030986-5796-4784-accd-c465c7c2daa3\") " Nov 26 14:33:20 crc kubenswrapper[5037]: I1126 14:33:20.120681 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/25030986-5796-4784-accd-c465c7c2daa3-oauth-serving-cert\") pod \"25030986-5796-4784-accd-c465c7c2daa3\" (UID: \"25030986-5796-4784-accd-c465c7c2daa3\") " Nov 26 14:33:20 crc kubenswrapper[5037]: I1126 14:33:20.120702 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/25030986-5796-4784-accd-c465c7c2daa3-console-oauth-config\") pod \"25030986-5796-4784-accd-c465c7c2daa3\" (UID: \"25030986-5796-4784-accd-c465c7c2daa3\") " Nov 26 14:33:20 crc kubenswrapper[5037]: I1126 14:33:20.121533 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25030986-5796-4784-accd-c465c7c2daa3-console-config" (OuterVolumeSpecName: "console-config") pod "25030986-5796-4784-accd-c465c7c2daa3" (UID: "25030986-5796-4784-accd-c465c7c2daa3"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:33:20 crc kubenswrapper[5037]: I1126 14:33:20.121580 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25030986-5796-4784-accd-c465c7c2daa3-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "25030986-5796-4784-accd-c465c7c2daa3" (UID: "25030986-5796-4784-accd-c465c7c2daa3"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:33:20 crc kubenswrapper[5037]: I1126 14:33:20.121669 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25030986-5796-4784-accd-c465c7c2daa3-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "25030986-5796-4784-accd-c465c7c2daa3" (UID: "25030986-5796-4784-accd-c465c7c2daa3"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:33:20 crc kubenswrapper[5037]: I1126 14:33:20.122744 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25030986-5796-4784-accd-c465c7c2daa3-service-ca" (OuterVolumeSpecName: "service-ca") pod "25030986-5796-4784-accd-c465c7c2daa3" (UID: "25030986-5796-4784-accd-c465c7c2daa3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:33:20 crc kubenswrapper[5037]: I1126 14:33:20.127927 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25030986-5796-4784-accd-c465c7c2daa3-kube-api-access-gqrld" (OuterVolumeSpecName: "kube-api-access-gqrld") pod "25030986-5796-4784-accd-c465c7c2daa3" (UID: "25030986-5796-4784-accd-c465c7c2daa3"). InnerVolumeSpecName "kube-api-access-gqrld". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:33:20 crc kubenswrapper[5037]: I1126 14:33:20.128131 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25030986-5796-4784-accd-c465c7c2daa3-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "25030986-5796-4784-accd-c465c7c2daa3" (UID: "25030986-5796-4784-accd-c465c7c2daa3"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:33:20 crc kubenswrapper[5037]: I1126 14:33:20.129686 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25030986-5796-4784-accd-c465c7c2daa3-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "25030986-5796-4784-accd-c465c7c2daa3" (UID: "25030986-5796-4784-accd-c465c7c2daa3"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:33:20 crc kubenswrapper[5037]: I1126 14:33:20.222562 5037 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/25030986-5796-4784-accd-c465c7c2daa3-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 14:33:20 crc kubenswrapper[5037]: I1126 14:33:20.222624 5037 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/25030986-5796-4784-accd-c465c7c2daa3-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:33:20 crc kubenswrapper[5037]: I1126 14:33:20.222644 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gqrld\" (UniqueName: \"kubernetes.io/projected/25030986-5796-4784-accd-c465c7c2daa3-kube-api-access-gqrld\") on node \"crc\" DevicePath \"\"" Nov 26 14:33:20 crc kubenswrapper[5037]: I1126 14:33:20.222666 5037 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/25030986-5796-4784-accd-c465c7c2daa3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 14:33:20 crc kubenswrapper[5037]: I1126 14:33:20.222686 5037 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/25030986-5796-4784-accd-c465c7c2daa3-console-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:33:20 crc kubenswrapper[5037]: I1126 14:33:20.222776 5037 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/25030986-5796-4784-accd-c465c7c2daa3-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:33:20 crc kubenswrapper[5037]: I1126 14:33:20.222851 5037 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/25030986-5796-4784-accd-c465c7c2daa3-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 14:33:20 crc kubenswrapper[5037]: I1126 14:33:20.420731 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-qfdqh_25030986-5796-4784-accd-c465c7c2daa3/console/0.log" Nov 26 14:33:20 crc kubenswrapper[5037]: I1126 14:33:20.421078 5037 generic.go:334] "Generic (PLEG): container finished" podID="25030986-5796-4784-accd-c465c7c2daa3" containerID="a391bab194b21e82f9a65c9d50c1f816a2e9570924ca487704a028e55db59c39" exitCode=2 Nov 26 14:33:20 crc kubenswrapper[5037]: I1126 14:33:20.421110 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-qfdqh" event={"ID":"25030986-5796-4784-accd-c465c7c2daa3","Type":"ContainerDied","Data":"a391bab194b21e82f9a65c9d50c1f816a2e9570924ca487704a028e55db59c39"} Nov 26 14:33:20 crc kubenswrapper[5037]: I1126 14:33:20.421135 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-qfdqh" event={"ID":"25030986-5796-4784-accd-c465c7c2daa3","Type":"ContainerDied","Data":"b249f62189c50e0694913d4601fc3ae5be8a38e8d2e6db18ff14bb125d71cd54"} Nov 26 14:33:20 crc kubenswrapper[5037]: I1126 14:33:20.421152 5037 scope.go:117] "RemoveContainer" containerID="a391bab194b21e82f9a65c9d50c1f816a2e9570924ca487704a028e55db59c39" Nov 26 14:33:20 crc kubenswrapper[5037]: I1126 14:33:20.421167 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-qfdqh" Nov 26 14:33:20 crc kubenswrapper[5037]: I1126 14:33:20.462844 5037 scope.go:117] "RemoveContainer" containerID="a391bab194b21e82f9a65c9d50c1f816a2e9570924ca487704a028e55db59c39" Nov 26 14:33:20 crc kubenswrapper[5037]: E1126 14:33:20.463891 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a391bab194b21e82f9a65c9d50c1f816a2e9570924ca487704a028e55db59c39\": container with ID starting with a391bab194b21e82f9a65c9d50c1f816a2e9570924ca487704a028e55db59c39 not found: ID does not exist" containerID="a391bab194b21e82f9a65c9d50c1f816a2e9570924ca487704a028e55db59c39" Nov 26 14:33:20 crc kubenswrapper[5037]: I1126 14:33:20.463939 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a391bab194b21e82f9a65c9d50c1f816a2e9570924ca487704a028e55db59c39"} err="failed to get container status \"a391bab194b21e82f9a65c9d50c1f816a2e9570924ca487704a028e55db59c39\": rpc error: code = NotFound desc = could not find container \"a391bab194b21e82f9a65c9d50c1f816a2e9570924ca487704a028e55db59c39\": container with ID starting with a391bab194b21e82f9a65c9d50c1f816a2e9570924ca487704a028e55db59c39 not found: ID does not exist" Nov 26 14:33:20 crc kubenswrapper[5037]: I1126 14:33:20.474869 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-qfdqh"] Nov 26 14:33:20 crc kubenswrapper[5037]: I1126 14:33:20.478325 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-qfdqh"] Nov 26 14:33:21 crc kubenswrapper[5037]: I1126 14:33:21.919468 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25030986-5796-4784-accd-c465c7c2daa3" path="/var/lib/kubelet/pods/25030986-5796-4784-accd-c465c7c2daa3/volumes" Nov 26 14:33:22 crc kubenswrapper[5037]: I1126 14:33:22.442403 5037 generic.go:334] "Generic (PLEG): container finished" podID="2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4" containerID="24f26bec64a75ec2e531bcc4c3334840ec197e8fda815357809cd08d9883fe39" exitCode=0 Nov 26 14:33:22 crc kubenswrapper[5037]: I1126 14:33:22.442500 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6kkjdw" event={"ID":"2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4","Type":"ContainerDied","Data":"24f26bec64a75ec2e531bcc4c3334840ec197e8fda815357809cd08d9883fe39"} Nov 26 14:33:23 crc kubenswrapper[5037]: I1126 14:33:23.456729 5037 generic.go:334] "Generic (PLEG): container finished" podID="2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4" containerID="fa389a15fc40f8acc4461e695b2f2a4d9b7080163d0684b21f8286c3e384107f" exitCode=0 Nov 26 14:33:23 crc kubenswrapper[5037]: I1126 14:33:23.456782 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6kkjdw" event={"ID":"2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4","Type":"ContainerDied","Data":"fa389a15fc40f8acc4461e695b2f2a4d9b7080163d0684b21f8286c3e384107f"} Nov 26 14:33:24 crc kubenswrapper[5037]: I1126 14:33:24.756355 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6kkjdw" Nov 26 14:33:24 crc kubenswrapper[5037]: I1126 14:33:24.815600 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4-util\") pod \"2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4\" (UID: \"2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4\") " Nov 26 14:33:24 crc kubenswrapper[5037]: I1126 14:33:24.815718 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r4548\" (UniqueName: \"kubernetes.io/projected/2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4-kube-api-access-r4548\") pod \"2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4\" (UID: \"2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4\") " Nov 26 14:33:24 crc kubenswrapper[5037]: I1126 14:33:24.815814 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4-bundle\") pod \"2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4\" (UID: \"2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4\") " Nov 26 14:33:24 crc kubenswrapper[5037]: I1126 14:33:24.817252 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4-bundle" (OuterVolumeSpecName: "bundle") pod "2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4" (UID: "2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:33:24 crc kubenswrapper[5037]: I1126 14:33:24.824381 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4-kube-api-access-r4548" (OuterVolumeSpecName: "kube-api-access-r4548") pod "2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4" (UID: "2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4"). InnerVolumeSpecName "kube-api-access-r4548". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:33:24 crc kubenswrapper[5037]: I1126 14:33:24.827105 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4-util" (OuterVolumeSpecName: "util") pod "2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4" (UID: "2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:33:24 crc kubenswrapper[5037]: I1126 14:33:24.917854 5037 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:33:24 crc kubenswrapper[5037]: I1126 14:33:24.917903 5037 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4-util\") on node \"crc\" DevicePath \"\"" Nov 26 14:33:24 crc kubenswrapper[5037]: I1126 14:33:24.917913 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r4548\" (UniqueName: \"kubernetes.io/projected/2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4-kube-api-access-r4548\") on node \"crc\" DevicePath \"\"" Nov 26 14:33:25 crc kubenswrapper[5037]: I1126 14:33:25.472832 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6kkjdw" event={"ID":"2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4","Type":"ContainerDied","Data":"3da7efd15e5a8a6d534c19b09a7888da6d58ce0f7c94171016664abd57565444"} Nov 26 14:33:25 crc kubenswrapper[5037]: I1126 14:33:25.472883 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3da7efd15e5a8a6d534c19b09a7888da6d58ce0f7c94171016664abd57565444" Nov 26 14:33:25 crc kubenswrapper[5037]: I1126 14:33:25.472966 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6kkjdw" Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.348707 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-67bfdbb67f-dk98r"] Nov 26 14:33:39 crc kubenswrapper[5037]: E1126 14:33:39.349626 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25030986-5796-4784-accd-c465c7c2daa3" containerName="console" Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.349642 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="25030986-5796-4784-accd-c465c7c2daa3" containerName="console" Nov 26 14:33:39 crc kubenswrapper[5037]: E1126 14:33:39.349658 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4" containerName="util" Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.349666 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4" containerName="util" Nov 26 14:33:39 crc kubenswrapper[5037]: E1126 14:33:39.349681 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4" containerName="extract" Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.349689 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4" containerName="extract" Nov 26 14:33:39 crc kubenswrapper[5037]: E1126 14:33:39.349707 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4" containerName="pull" Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.349714 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4" containerName="pull" Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.349822 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4" containerName="extract" Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.349847 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="25030986-5796-4784-accd-c465c7c2daa3" containerName="console" Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.350378 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-67bfdbb67f-dk98r" Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.354868 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.354912 5037 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.355418 5037 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.355438 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.355556 5037 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-fpxcv" Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.368602 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-67bfdbb67f-dk98r"] Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.416059 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/b98b1801-9296-470e-b171-7923029c8747-apiservice-cert\") pod \"metallb-operator-controller-manager-67bfdbb67f-dk98r\" (UID: \"b98b1801-9296-470e-b171-7923029c8747\") " pod="metallb-system/metallb-operator-controller-manager-67bfdbb67f-dk98r" Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.416113 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/b98b1801-9296-470e-b171-7923029c8747-webhook-cert\") pod \"metallb-operator-controller-manager-67bfdbb67f-dk98r\" (UID: \"b98b1801-9296-470e-b171-7923029c8747\") " pod="metallb-system/metallb-operator-controller-manager-67bfdbb67f-dk98r" Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.416150 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rkdl4\" (UniqueName: \"kubernetes.io/projected/b98b1801-9296-470e-b171-7923029c8747-kube-api-access-rkdl4\") pod \"metallb-operator-controller-manager-67bfdbb67f-dk98r\" (UID: \"b98b1801-9296-470e-b171-7923029c8747\") " pod="metallb-system/metallb-operator-controller-manager-67bfdbb67f-dk98r" Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.516918 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/b98b1801-9296-470e-b171-7923029c8747-apiservice-cert\") pod \"metallb-operator-controller-manager-67bfdbb67f-dk98r\" (UID: \"b98b1801-9296-470e-b171-7923029c8747\") " pod="metallb-system/metallb-operator-controller-manager-67bfdbb67f-dk98r" Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.517301 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/b98b1801-9296-470e-b171-7923029c8747-webhook-cert\") pod \"metallb-operator-controller-manager-67bfdbb67f-dk98r\" (UID: \"b98b1801-9296-470e-b171-7923029c8747\") " pod="metallb-system/metallb-operator-controller-manager-67bfdbb67f-dk98r" Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.517332 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rkdl4\" (UniqueName: \"kubernetes.io/projected/b98b1801-9296-470e-b171-7923029c8747-kube-api-access-rkdl4\") pod \"metallb-operator-controller-manager-67bfdbb67f-dk98r\" (UID: \"b98b1801-9296-470e-b171-7923029c8747\") " pod="metallb-system/metallb-operator-controller-manager-67bfdbb67f-dk98r" Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.523533 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/b98b1801-9296-470e-b171-7923029c8747-webhook-cert\") pod \"metallb-operator-controller-manager-67bfdbb67f-dk98r\" (UID: \"b98b1801-9296-470e-b171-7923029c8747\") " pod="metallb-system/metallb-operator-controller-manager-67bfdbb67f-dk98r" Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.532975 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/b98b1801-9296-470e-b171-7923029c8747-apiservice-cert\") pod \"metallb-operator-controller-manager-67bfdbb67f-dk98r\" (UID: \"b98b1801-9296-470e-b171-7923029c8747\") " pod="metallb-system/metallb-operator-controller-manager-67bfdbb67f-dk98r" Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.541045 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rkdl4\" (UniqueName: \"kubernetes.io/projected/b98b1801-9296-470e-b171-7923029c8747-kube-api-access-rkdl4\") pod \"metallb-operator-controller-manager-67bfdbb67f-dk98r\" (UID: \"b98b1801-9296-470e-b171-7923029c8747\") " pod="metallb-system/metallb-operator-controller-manager-67bfdbb67f-dk98r" Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.667249 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-67bfdbb67f-dk98r" Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.677368 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-f4db46f7f-5dxzf"] Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.678255 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-f4db46f7f-5dxzf" Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.684179 5037 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.684675 5037 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-zbzh6" Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.684840 5037 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.697622 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-f4db46f7f-5dxzf"] Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.721948 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-df76c\" (UniqueName: \"kubernetes.io/projected/75840ff3-188c-49b7-8dc4-0f52d981a5d8-kube-api-access-df76c\") pod \"metallb-operator-webhook-server-f4db46f7f-5dxzf\" (UID: \"75840ff3-188c-49b7-8dc4-0f52d981a5d8\") " pod="metallb-system/metallb-operator-webhook-server-f4db46f7f-5dxzf" Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.722008 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/75840ff3-188c-49b7-8dc4-0f52d981a5d8-webhook-cert\") pod \"metallb-operator-webhook-server-f4db46f7f-5dxzf\" (UID: \"75840ff3-188c-49b7-8dc4-0f52d981a5d8\") " pod="metallb-system/metallb-operator-webhook-server-f4db46f7f-5dxzf" Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.722063 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/75840ff3-188c-49b7-8dc4-0f52d981a5d8-apiservice-cert\") pod \"metallb-operator-webhook-server-f4db46f7f-5dxzf\" (UID: \"75840ff3-188c-49b7-8dc4-0f52d981a5d8\") " pod="metallb-system/metallb-operator-webhook-server-f4db46f7f-5dxzf" Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.824080 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/75840ff3-188c-49b7-8dc4-0f52d981a5d8-apiservice-cert\") pod \"metallb-operator-webhook-server-f4db46f7f-5dxzf\" (UID: \"75840ff3-188c-49b7-8dc4-0f52d981a5d8\") " pod="metallb-system/metallb-operator-webhook-server-f4db46f7f-5dxzf" Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.824173 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-df76c\" (UniqueName: \"kubernetes.io/projected/75840ff3-188c-49b7-8dc4-0f52d981a5d8-kube-api-access-df76c\") pod \"metallb-operator-webhook-server-f4db46f7f-5dxzf\" (UID: \"75840ff3-188c-49b7-8dc4-0f52d981a5d8\") " pod="metallb-system/metallb-operator-webhook-server-f4db46f7f-5dxzf" Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.824200 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/75840ff3-188c-49b7-8dc4-0f52d981a5d8-webhook-cert\") pod \"metallb-operator-webhook-server-f4db46f7f-5dxzf\" (UID: \"75840ff3-188c-49b7-8dc4-0f52d981a5d8\") " pod="metallb-system/metallb-operator-webhook-server-f4db46f7f-5dxzf" Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.828580 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/75840ff3-188c-49b7-8dc4-0f52d981a5d8-apiservice-cert\") pod \"metallb-operator-webhook-server-f4db46f7f-5dxzf\" (UID: \"75840ff3-188c-49b7-8dc4-0f52d981a5d8\") " pod="metallb-system/metallb-operator-webhook-server-f4db46f7f-5dxzf" Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.837111 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/75840ff3-188c-49b7-8dc4-0f52d981a5d8-webhook-cert\") pod \"metallb-operator-webhook-server-f4db46f7f-5dxzf\" (UID: \"75840ff3-188c-49b7-8dc4-0f52d981a5d8\") " pod="metallb-system/metallb-operator-webhook-server-f4db46f7f-5dxzf" Nov 26 14:33:39 crc kubenswrapper[5037]: I1126 14:33:39.850299 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-df76c\" (UniqueName: \"kubernetes.io/projected/75840ff3-188c-49b7-8dc4-0f52d981a5d8-kube-api-access-df76c\") pod \"metallb-operator-webhook-server-f4db46f7f-5dxzf\" (UID: \"75840ff3-188c-49b7-8dc4-0f52d981a5d8\") " pod="metallb-system/metallb-operator-webhook-server-f4db46f7f-5dxzf" Nov 26 14:33:40 crc kubenswrapper[5037]: I1126 14:33:40.046794 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-f4db46f7f-5dxzf" Nov 26 14:33:40 crc kubenswrapper[5037]: I1126 14:33:40.077624 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-67bfdbb67f-dk98r"] Nov 26 14:33:40 crc kubenswrapper[5037]: W1126 14:33:40.097134 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb98b1801_9296_470e_b171_7923029c8747.slice/crio-cfaf136d1c9a34b978de4b715c32f675501a7aeda8302f433dfb1bfc0e177cab WatchSource:0}: Error finding container cfaf136d1c9a34b978de4b715c32f675501a7aeda8302f433dfb1bfc0e177cab: Status 404 returned error can't find the container with id cfaf136d1c9a34b978de4b715c32f675501a7aeda8302f433dfb1bfc0e177cab Nov 26 14:33:40 crc kubenswrapper[5037]: I1126 14:33:40.276174 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-f4db46f7f-5dxzf"] Nov 26 14:33:40 crc kubenswrapper[5037]: W1126 14:33:40.280957 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod75840ff3_188c_49b7_8dc4_0f52d981a5d8.slice/crio-f4996918f68fddc89e35aeb354f67280e837fc203afaf0919638de28fe101283 WatchSource:0}: Error finding container f4996918f68fddc89e35aeb354f67280e837fc203afaf0919638de28fe101283: Status 404 returned error can't find the container with id f4996918f68fddc89e35aeb354f67280e837fc203afaf0919638de28fe101283 Nov 26 14:33:40 crc kubenswrapper[5037]: I1126 14:33:40.569148 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-f4db46f7f-5dxzf" event={"ID":"75840ff3-188c-49b7-8dc4-0f52d981a5d8","Type":"ContainerStarted","Data":"f4996918f68fddc89e35aeb354f67280e837fc203afaf0919638de28fe101283"} Nov 26 14:33:40 crc kubenswrapper[5037]: I1126 14:33:40.570439 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-67bfdbb67f-dk98r" event={"ID":"b98b1801-9296-470e-b171-7923029c8747","Type":"ContainerStarted","Data":"cfaf136d1c9a34b978de4b715c32f675501a7aeda8302f433dfb1bfc0e177cab"} Nov 26 14:33:41 crc kubenswrapper[5037]: I1126 14:33:41.246895 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 14:33:41 crc kubenswrapper[5037]: I1126 14:33:41.247188 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 14:33:45 crc kubenswrapper[5037]: I1126 14:33:45.604130 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-67bfdbb67f-dk98r" event={"ID":"b98b1801-9296-470e-b171-7923029c8747","Type":"ContainerStarted","Data":"19aa0f2d2b850089f7e767ddf1681233a1c8810e6b6297619a53ea0eb0b98af8"} Nov 26 14:33:45 crc kubenswrapper[5037]: I1126 14:33:45.604784 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-67bfdbb67f-dk98r" Nov 26 14:33:45 crc kubenswrapper[5037]: I1126 14:33:45.641611 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-67bfdbb67f-dk98r" podStartSLOduration=1.375557917 podStartE2EDuration="6.641593882s" podCreationTimestamp="2025-11-26 14:33:39 +0000 UTC" firstStartedPulling="2025-11-26 14:33:40.110216616 +0000 UTC m=+1086.906986800" lastFinishedPulling="2025-11-26 14:33:45.376252581 +0000 UTC m=+1092.173022765" observedRunningTime="2025-11-26 14:33:45.6386402 +0000 UTC m=+1092.435410384" watchObservedRunningTime="2025-11-26 14:33:45.641593882 +0000 UTC m=+1092.438364086" Nov 26 14:33:46 crc kubenswrapper[5037]: I1126 14:33:46.613494 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-f4db46f7f-5dxzf" event={"ID":"75840ff3-188c-49b7-8dc4-0f52d981a5d8","Type":"ContainerStarted","Data":"4ffd4f978cf663a575e1fb9e1ab9fed9e9c987b52480cfee19596d9f5476a0c1"} Nov 26 14:33:46 crc kubenswrapper[5037]: I1126 14:33:46.648339 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-f4db46f7f-5dxzf" podStartSLOduration=2.223763122 podStartE2EDuration="7.648318413s" podCreationTimestamp="2025-11-26 14:33:39 +0000 UTC" firstStartedPulling="2025-11-26 14:33:40.283902452 +0000 UTC m=+1087.080672636" lastFinishedPulling="2025-11-26 14:33:45.708457743 +0000 UTC m=+1092.505227927" observedRunningTime="2025-11-26 14:33:46.642970532 +0000 UTC m=+1093.439740716" watchObservedRunningTime="2025-11-26 14:33:46.648318413 +0000 UTC m=+1093.445088597" Nov 26 14:33:47 crc kubenswrapper[5037]: I1126 14:33:47.620026 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-f4db46f7f-5dxzf" Nov 26 14:34:00 crc kubenswrapper[5037]: I1126 14:34:00.051563 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-f4db46f7f-5dxzf" Nov 26 14:34:11 crc kubenswrapper[5037]: I1126 14:34:11.247159 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 14:34:11 crc kubenswrapper[5037]: I1126 14:34:11.247718 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 14:34:11 crc kubenswrapper[5037]: I1126 14:34:11.247763 5037 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" Nov 26 14:34:11 crc kubenswrapper[5037]: I1126 14:34:11.248382 5037 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"36a7c42fc7524fe0b2d1a2075eae30f52f037f2969e9db7800448ccd49cfcc57"} pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 14:34:11 crc kubenswrapper[5037]: I1126 14:34:11.248428 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" containerID="cri-o://36a7c42fc7524fe0b2d1a2075eae30f52f037f2969e9db7800448ccd49cfcc57" gracePeriod=600 Nov 26 14:34:11 crc kubenswrapper[5037]: I1126 14:34:11.773785 5037 generic.go:334] "Generic (PLEG): container finished" podID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerID="36a7c42fc7524fe0b2d1a2075eae30f52f037f2969e9db7800448ccd49cfcc57" exitCode=0 Nov 26 14:34:11 crc kubenswrapper[5037]: I1126 14:34:11.773867 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" event={"ID":"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb","Type":"ContainerDied","Data":"36a7c42fc7524fe0b2d1a2075eae30f52f037f2969e9db7800448ccd49cfcc57"} Nov 26 14:34:11 crc kubenswrapper[5037]: I1126 14:34:11.774216 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" event={"ID":"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb","Type":"ContainerStarted","Data":"302cbe16bdb6c8873822bf0697d168f893d8457e80a7e1227846608f32db69c8"} Nov 26 14:34:11 crc kubenswrapper[5037]: I1126 14:34:11.774246 5037 scope.go:117] "RemoveContainer" containerID="ff44b46a3dc466f256d3b6fac132034130b6623577a5e3d570d1982ec2c3ae66" Nov 26 14:34:19 crc kubenswrapper[5037]: I1126 14:34:19.669696 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-67bfdbb67f-dk98r" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.479265 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-lxggf"] Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.480062 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-lxggf" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.485194 5037 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.485593 5037 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-9qj5r" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.487159 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-qp4qk"] Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.490389 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-qp4qk" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.490742 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-lxggf"] Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.493271 5037 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.493404 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.578266 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-mcjkn"] Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.579408 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-mcjkn" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.581962 5037 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.581983 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.582077 5037 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-2gdfm" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.582169 5037 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.602839 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6c7b4b5f48-whwvq"] Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.603841 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-whwvq" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.606174 5037 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.622636 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-whwvq"] Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.673905 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/a8dac2bd-68ba-4cab-8119-051f1d14219f-frr-sockets\") pod \"frr-k8s-qp4qk\" (UID: \"a8dac2bd-68ba-4cab-8119-051f1d14219f\") " pod="metallb-system/frr-k8s-qp4qk" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.673951 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/a8dac2bd-68ba-4cab-8119-051f1d14219f-metrics\") pod \"frr-k8s-qp4qk\" (UID: \"a8dac2bd-68ba-4cab-8119-051f1d14219f\") " pod="metallb-system/frr-k8s-qp4qk" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.673971 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/a8dac2bd-68ba-4cab-8119-051f1d14219f-reloader\") pod \"frr-k8s-qp4qk\" (UID: \"a8dac2bd-68ba-4cab-8119-051f1d14219f\") " pod="metallb-system/frr-k8s-qp4qk" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.673988 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/933638aa-7d2b-4e93-b969-42484711d78c-cert\") pod \"frr-k8s-webhook-server-6998585d5-lxggf\" (UID: \"933638aa-7d2b-4e93-b969-42484711d78c\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-lxggf" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.674010 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/a8dac2bd-68ba-4cab-8119-051f1d14219f-frr-conf\") pod \"frr-k8s-qp4qk\" (UID: \"a8dac2bd-68ba-4cab-8119-051f1d14219f\") " pod="metallb-system/frr-k8s-qp4qk" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.674079 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a8dac2bd-68ba-4cab-8119-051f1d14219f-metrics-certs\") pod \"frr-k8s-qp4qk\" (UID: \"a8dac2bd-68ba-4cab-8119-051f1d14219f\") " pod="metallb-system/frr-k8s-qp4qk" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.674120 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/a8dac2bd-68ba-4cab-8119-051f1d14219f-frr-startup\") pod \"frr-k8s-qp4qk\" (UID: \"a8dac2bd-68ba-4cab-8119-051f1d14219f\") " pod="metallb-system/frr-k8s-qp4qk" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.674139 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bvhfm\" (UniqueName: \"kubernetes.io/projected/933638aa-7d2b-4e93-b969-42484711d78c-kube-api-access-bvhfm\") pod \"frr-k8s-webhook-server-6998585d5-lxggf\" (UID: \"933638aa-7d2b-4e93-b969-42484711d78c\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-lxggf" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.674160 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2n5hd\" (UniqueName: \"kubernetes.io/projected/a8dac2bd-68ba-4cab-8119-051f1d14219f-kube-api-access-2n5hd\") pod \"frr-k8s-qp4qk\" (UID: \"a8dac2bd-68ba-4cab-8119-051f1d14219f\") " pod="metallb-system/frr-k8s-qp4qk" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.775321 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a8dac2bd-68ba-4cab-8119-051f1d14219f-metrics-certs\") pod \"frr-k8s-qp4qk\" (UID: \"a8dac2bd-68ba-4cab-8119-051f1d14219f\") " pod="metallb-system/frr-k8s-qp4qk" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.775380 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bvhfm\" (UniqueName: \"kubernetes.io/projected/933638aa-7d2b-4e93-b969-42484711d78c-kube-api-access-bvhfm\") pod \"frr-k8s-webhook-server-6998585d5-lxggf\" (UID: \"933638aa-7d2b-4e93-b969-42484711d78c\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-lxggf" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.775403 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/a8dac2bd-68ba-4cab-8119-051f1d14219f-frr-startup\") pod \"frr-k8s-qp4qk\" (UID: \"a8dac2bd-68ba-4cab-8119-051f1d14219f\") " pod="metallb-system/frr-k8s-qp4qk" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.775421 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2n5hd\" (UniqueName: \"kubernetes.io/projected/a8dac2bd-68ba-4cab-8119-051f1d14219f-kube-api-access-2n5hd\") pod \"frr-k8s-qp4qk\" (UID: \"a8dac2bd-68ba-4cab-8119-051f1d14219f\") " pod="metallb-system/frr-k8s-qp4qk" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.775462 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xv8w4\" (UniqueName: \"kubernetes.io/projected/568c6e76-bd77-4cdb-a947-faf6537e5a41-kube-api-access-xv8w4\") pod \"controller-6c7b4b5f48-whwvq\" (UID: \"568c6e76-bd77-4cdb-a947-faf6537e5a41\") " pod="metallb-system/controller-6c7b4b5f48-whwvq" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.775490 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45-metrics-certs\") pod \"speaker-mcjkn\" (UID: \"bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45\") " pod="metallb-system/speaker-mcjkn" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.775543 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45-memberlist\") pod \"speaker-mcjkn\" (UID: \"bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45\") " pod="metallb-system/speaker-mcjkn" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.775581 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/568c6e76-bd77-4cdb-a947-faf6537e5a41-cert\") pod \"controller-6c7b4b5f48-whwvq\" (UID: \"568c6e76-bd77-4cdb-a947-faf6537e5a41\") " pod="metallb-system/controller-6c7b4b5f48-whwvq" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.775606 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kjgss\" (UniqueName: \"kubernetes.io/projected/bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45-kube-api-access-kjgss\") pod \"speaker-mcjkn\" (UID: \"bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45\") " pod="metallb-system/speaker-mcjkn" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.775620 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/568c6e76-bd77-4cdb-a947-faf6537e5a41-metrics-certs\") pod \"controller-6c7b4b5f48-whwvq\" (UID: \"568c6e76-bd77-4cdb-a947-faf6537e5a41\") " pod="metallb-system/controller-6c7b4b5f48-whwvq" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.775651 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/a8dac2bd-68ba-4cab-8119-051f1d14219f-frr-sockets\") pod \"frr-k8s-qp4qk\" (UID: \"a8dac2bd-68ba-4cab-8119-051f1d14219f\") " pod="metallb-system/frr-k8s-qp4qk" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.775677 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/a8dac2bd-68ba-4cab-8119-051f1d14219f-metrics\") pod \"frr-k8s-qp4qk\" (UID: \"a8dac2bd-68ba-4cab-8119-051f1d14219f\") " pod="metallb-system/frr-k8s-qp4qk" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.775697 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/a8dac2bd-68ba-4cab-8119-051f1d14219f-reloader\") pod \"frr-k8s-qp4qk\" (UID: \"a8dac2bd-68ba-4cab-8119-051f1d14219f\") " pod="metallb-system/frr-k8s-qp4qk" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.775717 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/933638aa-7d2b-4e93-b969-42484711d78c-cert\") pod \"frr-k8s-webhook-server-6998585d5-lxggf\" (UID: \"933638aa-7d2b-4e93-b969-42484711d78c\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-lxggf" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.775734 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45-metallb-excludel2\") pod \"speaker-mcjkn\" (UID: \"bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45\") " pod="metallb-system/speaker-mcjkn" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.775759 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/a8dac2bd-68ba-4cab-8119-051f1d14219f-frr-conf\") pod \"frr-k8s-qp4qk\" (UID: \"a8dac2bd-68ba-4cab-8119-051f1d14219f\") " pod="metallb-system/frr-k8s-qp4qk" Nov 26 14:34:20 crc kubenswrapper[5037]: E1126 14:34:20.775861 5037 secret.go:188] Couldn't get secret metallb-system/frr-k8s-webhook-server-cert: secret "frr-k8s-webhook-server-cert" not found Nov 26 14:34:20 crc kubenswrapper[5037]: E1126 14:34:20.775945 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/933638aa-7d2b-4e93-b969-42484711d78c-cert podName:933638aa-7d2b-4e93-b969-42484711d78c nodeName:}" failed. No retries permitted until 2025-11-26 14:34:21.275929472 +0000 UTC m=+1128.072699656 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/933638aa-7d2b-4e93-b969-42484711d78c-cert") pod "frr-k8s-webhook-server-6998585d5-lxggf" (UID: "933638aa-7d2b-4e93-b969-42484711d78c") : secret "frr-k8s-webhook-server-cert" not found Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.776364 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/a8dac2bd-68ba-4cab-8119-051f1d14219f-frr-sockets\") pod \"frr-k8s-qp4qk\" (UID: \"a8dac2bd-68ba-4cab-8119-051f1d14219f\") " pod="metallb-system/frr-k8s-qp4qk" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.776828 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/a8dac2bd-68ba-4cab-8119-051f1d14219f-reloader\") pod \"frr-k8s-qp4qk\" (UID: \"a8dac2bd-68ba-4cab-8119-051f1d14219f\") " pod="metallb-system/frr-k8s-qp4qk" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.776854 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/a8dac2bd-68ba-4cab-8119-051f1d14219f-frr-startup\") pod \"frr-k8s-qp4qk\" (UID: \"a8dac2bd-68ba-4cab-8119-051f1d14219f\") " pod="metallb-system/frr-k8s-qp4qk" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.776929 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/a8dac2bd-68ba-4cab-8119-051f1d14219f-metrics\") pod \"frr-k8s-qp4qk\" (UID: \"a8dac2bd-68ba-4cab-8119-051f1d14219f\") " pod="metallb-system/frr-k8s-qp4qk" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.777014 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/a8dac2bd-68ba-4cab-8119-051f1d14219f-frr-conf\") pod \"frr-k8s-qp4qk\" (UID: \"a8dac2bd-68ba-4cab-8119-051f1d14219f\") " pod="metallb-system/frr-k8s-qp4qk" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.784149 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a8dac2bd-68ba-4cab-8119-051f1d14219f-metrics-certs\") pod \"frr-k8s-qp4qk\" (UID: \"a8dac2bd-68ba-4cab-8119-051f1d14219f\") " pod="metallb-system/frr-k8s-qp4qk" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.794704 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bvhfm\" (UniqueName: \"kubernetes.io/projected/933638aa-7d2b-4e93-b969-42484711d78c-kube-api-access-bvhfm\") pod \"frr-k8s-webhook-server-6998585d5-lxggf\" (UID: \"933638aa-7d2b-4e93-b969-42484711d78c\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-lxggf" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.808919 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2n5hd\" (UniqueName: \"kubernetes.io/projected/a8dac2bd-68ba-4cab-8119-051f1d14219f-kube-api-access-2n5hd\") pod \"frr-k8s-qp4qk\" (UID: \"a8dac2bd-68ba-4cab-8119-051f1d14219f\") " pod="metallb-system/frr-k8s-qp4qk" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.813737 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-qp4qk" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.876633 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45-metallb-excludel2\") pod \"speaker-mcjkn\" (UID: \"bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45\") " pod="metallb-system/speaker-mcjkn" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.876743 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xv8w4\" (UniqueName: \"kubernetes.io/projected/568c6e76-bd77-4cdb-a947-faf6537e5a41-kube-api-access-xv8w4\") pod \"controller-6c7b4b5f48-whwvq\" (UID: \"568c6e76-bd77-4cdb-a947-faf6537e5a41\") " pod="metallb-system/controller-6c7b4b5f48-whwvq" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.876773 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45-metrics-certs\") pod \"speaker-mcjkn\" (UID: \"bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45\") " pod="metallb-system/speaker-mcjkn" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.876797 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45-memberlist\") pod \"speaker-mcjkn\" (UID: \"bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45\") " pod="metallb-system/speaker-mcjkn" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.876845 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/568c6e76-bd77-4cdb-a947-faf6537e5a41-cert\") pod \"controller-6c7b4b5f48-whwvq\" (UID: \"568c6e76-bd77-4cdb-a947-faf6537e5a41\") " pod="metallb-system/controller-6c7b4b5f48-whwvq" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.876891 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/568c6e76-bd77-4cdb-a947-faf6537e5a41-metrics-certs\") pod \"controller-6c7b4b5f48-whwvq\" (UID: \"568c6e76-bd77-4cdb-a947-faf6537e5a41\") " pod="metallb-system/controller-6c7b4b5f48-whwvq" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.876925 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kjgss\" (UniqueName: \"kubernetes.io/projected/bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45-kube-api-access-kjgss\") pod \"speaker-mcjkn\" (UID: \"bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45\") " pod="metallb-system/speaker-mcjkn" Nov 26 14:34:20 crc kubenswrapper[5037]: E1126 14:34:20.877109 5037 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 26 14:34:20 crc kubenswrapper[5037]: E1126 14:34:20.877224 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45-memberlist podName:bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45 nodeName:}" failed. No retries permitted until 2025-11-26 14:34:21.377192932 +0000 UTC m=+1128.173963116 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45-memberlist") pod "speaker-mcjkn" (UID: "bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45") : secret "metallb-memberlist" not found Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.878233 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45-metallb-excludel2\") pod \"speaker-mcjkn\" (UID: \"bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45\") " pod="metallb-system/speaker-mcjkn" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.880506 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/568c6e76-bd77-4cdb-a947-faf6537e5a41-cert\") pod \"controller-6c7b4b5f48-whwvq\" (UID: \"568c6e76-bd77-4cdb-a947-faf6537e5a41\") " pod="metallb-system/controller-6c7b4b5f48-whwvq" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.881560 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/568c6e76-bd77-4cdb-a947-faf6537e5a41-metrics-certs\") pod \"controller-6c7b4b5f48-whwvq\" (UID: \"568c6e76-bd77-4cdb-a947-faf6537e5a41\") " pod="metallb-system/controller-6c7b4b5f48-whwvq" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.885882 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45-metrics-certs\") pod \"speaker-mcjkn\" (UID: \"bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45\") " pod="metallb-system/speaker-mcjkn" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.894926 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xv8w4\" (UniqueName: \"kubernetes.io/projected/568c6e76-bd77-4cdb-a947-faf6537e5a41-kube-api-access-xv8w4\") pod \"controller-6c7b4b5f48-whwvq\" (UID: \"568c6e76-bd77-4cdb-a947-faf6537e5a41\") " pod="metallb-system/controller-6c7b4b5f48-whwvq" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.896757 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kjgss\" (UniqueName: \"kubernetes.io/projected/bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45-kube-api-access-kjgss\") pod \"speaker-mcjkn\" (UID: \"bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45\") " pod="metallb-system/speaker-mcjkn" Nov 26 14:34:20 crc kubenswrapper[5037]: I1126 14:34:20.917701 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-whwvq" Nov 26 14:34:21 crc kubenswrapper[5037]: I1126 14:34:21.145223 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-whwvq"] Nov 26 14:34:21 crc kubenswrapper[5037]: I1126 14:34:21.282935 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/933638aa-7d2b-4e93-b969-42484711d78c-cert\") pod \"frr-k8s-webhook-server-6998585d5-lxggf\" (UID: \"933638aa-7d2b-4e93-b969-42484711d78c\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-lxggf" Nov 26 14:34:21 crc kubenswrapper[5037]: I1126 14:34:21.289397 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/933638aa-7d2b-4e93-b969-42484711d78c-cert\") pod \"frr-k8s-webhook-server-6998585d5-lxggf\" (UID: \"933638aa-7d2b-4e93-b969-42484711d78c\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-lxggf" Nov 26 14:34:21 crc kubenswrapper[5037]: I1126 14:34:21.385768 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45-memberlist\") pod \"speaker-mcjkn\" (UID: \"bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45\") " pod="metallb-system/speaker-mcjkn" Nov 26 14:34:21 crc kubenswrapper[5037]: E1126 14:34:21.385942 5037 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 26 14:34:21 crc kubenswrapper[5037]: E1126 14:34:21.386039 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45-memberlist podName:bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45 nodeName:}" failed. No retries permitted until 2025-11-26 14:34:22.38602058 +0000 UTC m=+1129.182790764 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45-memberlist") pod "speaker-mcjkn" (UID: "bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45") : secret "metallb-memberlist" not found Nov 26 14:34:21 crc kubenswrapper[5037]: I1126 14:34:21.404936 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-lxggf" Nov 26 14:34:21 crc kubenswrapper[5037]: I1126 14:34:21.669086 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-lxggf"] Nov 26 14:34:21 crc kubenswrapper[5037]: W1126 14:34:21.677892 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod933638aa_7d2b_4e93_b969_42484711d78c.slice/crio-74077fc8c8d177bfc9eb33073692ea6f5d41c3f2fc06a4cd76e4dd5fdef7218a WatchSource:0}: Error finding container 74077fc8c8d177bfc9eb33073692ea6f5d41c3f2fc06a4cd76e4dd5fdef7218a: Status 404 returned error can't find the container with id 74077fc8c8d177bfc9eb33073692ea6f5d41c3f2fc06a4cd76e4dd5fdef7218a Nov 26 14:34:21 crc kubenswrapper[5037]: I1126 14:34:21.897027 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-whwvq" event={"ID":"568c6e76-bd77-4cdb-a947-faf6537e5a41","Type":"ContainerStarted","Data":"ea1d757454636e90d06c1a8e0dcd502360dfba54ddab2c30bec76f56c4bb3ebe"} Nov 26 14:34:21 crc kubenswrapper[5037]: I1126 14:34:21.897089 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-whwvq" event={"ID":"568c6e76-bd77-4cdb-a947-faf6537e5a41","Type":"ContainerStarted","Data":"853a7ca9c50ff94e14b31d7cc93ba7c4ab2ba7aea2b18a5cbc96d58a93d9891a"} Nov 26 14:34:21 crc kubenswrapper[5037]: I1126 14:34:21.897105 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-whwvq" event={"ID":"568c6e76-bd77-4cdb-a947-faf6537e5a41","Type":"ContainerStarted","Data":"665cbff47ce02d1d888f39cfb2a03016078a2a7e26b690775e97708b221da868"} Nov 26 14:34:21 crc kubenswrapper[5037]: I1126 14:34:21.897265 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6c7b4b5f48-whwvq" Nov 26 14:34:21 crc kubenswrapper[5037]: I1126 14:34:21.898747 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qp4qk" event={"ID":"a8dac2bd-68ba-4cab-8119-051f1d14219f","Type":"ContainerStarted","Data":"0e0840368b35434d3c7f79fa266e5cbb80980fbd0dd7aaf54eb40825e757d565"} Nov 26 14:34:21 crc kubenswrapper[5037]: I1126 14:34:21.900191 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-lxggf" event={"ID":"933638aa-7d2b-4e93-b969-42484711d78c","Type":"ContainerStarted","Data":"74077fc8c8d177bfc9eb33073692ea6f5d41c3f2fc06a4cd76e4dd5fdef7218a"} Nov 26 14:34:21 crc kubenswrapper[5037]: I1126 14:34:21.913398 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6c7b4b5f48-whwvq" podStartSLOduration=1.913375882 podStartE2EDuration="1.913375882s" podCreationTimestamp="2025-11-26 14:34:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:34:21.912054479 +0000 UTC m=+1128.708824673" watchObservedRunningTime="2025-11-26 14:34:21.913375882 +0000 UTC m=+1128.710146066" Nov 26 14:34:22 crc kubenswrapper[5037]: I1126 14:34:22.397123 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45-memberlist\") pod \"speaker-mcjkn\" (UID: \"bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45\") " pod="metallb-system/speaker-mcjkn" Nov 26 14:34:22 crc kubenswrapper[5037]: I1126 14:34:22.409884 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45-memberlist\") pod \"speaker-mcjkn\" (UID: \"bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45\") " pod="metallb-system/speaker-mcjkn" Nov 26 14:34:22 crc kubenswrapper[5037]: I1126 14:34:22.694095 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-mcjkn" Nov 26 14:34:22 crc kubenswrapper[5037]: W1126 14:34:22.727200 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbd6f7b35_4e97_4a38_ba5e_e4281e0d4b45.slice/crio-e5382439e74e9e399f97a8f5bef2f589571fb4bcce61f27d39a5d25d082a1649 WatchSource:0}: Error finding container e5382439e74e9e399f97a8f5bef2f589571fb4bcce61f27d39a5d25d082a1649: Status 404 returned error can't find the container with id e5382439e74e9e399f97a8f5bef2f589571fb4bcce61f27d39a5d25d082a1649 Nov 26 14:34:22 crc kubenswrapper[5037]: I1126 14:34:22.918182 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-mcjkn" event={"ID":"bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45","Type":"ContainerStarted","Data":"e5382439e74e9e399f97a8f5bef2f589571fb4bcce61f27d39a5d25d082a1649"} Nov 26 14:34:23 crc kubenswrapper[5037]: I1126 14:34:23.946192 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-mcjkn" event={"ID":"bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45","Type":"ContainerStarted","Data":"335f1604d6a8e52e3a11f1c041636b87f00012eeee03ade3d6254e5531e8633c"} Nov 26 14:34:23 crc kubenswrapper[5037]: I1126 14:34:23.946596 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-mcjkn" event={"ID":"bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45","Type":"ContainerStarted","Data":"d40570d7d8a9ba5f813353458f4928052106423632840b4cd0dd4fccdfb8106f"} Nov 26 14:34:23 crc kubenswrapper[5037]: I1126 14:34:23.947110 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-mcjkn" Nov 26 14:34:23 crc kubenswrapper[5037]: I1126 14:34:23.971252 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-mcjkn" podStartSLOduration=3.971230258 podStartE2EDuration="3.971230258s" podCreationTimestamp="2025-11-26 14:34:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:34:23.971019493 +0000 UTC m=+1130.767789667" watchObservedRunningTime="2025-11-26 14:34:23.971230258 +0000 UTC m=+1130.768000452" Nov 26 14:34:30 crc kubenswrapper[5037]: I1126 14:34:30.019808 5037 generic.go:334] "Generic (PLEG): container finished" podID="a8dac2bd-68ba-4cab-8119-051f1d14219f" containerID="0ff1fc74c95dfb6f5d28c3c7947fef65d1247177cd615c5153e81d2bdb01e7c4" exitCode=0 Nov 26 14:34:30 crc kubenswrapper[5037]: I1126 14:34:30.021219 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qp4qk" event={"ID":"a8dac2bd-68ba-4cab-8119-051f1d14219f","Type":"ContainerDied","Data":"0ff1fc74c95dfb6f5d28c3c7947fef65d1247177cd615c5153e81d2bdb01e7c4"} Nov 26 14:34:30 crc kubenswrapper[5037]: I1126 14:34:30.025502 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-lxggf" event={"ID":"933638aa-7d2b-4e93-b969-42484711d78c","Type":"ContainerStarted","Data":"12d3e9db904b135714fac5130987d1442fcee18e4a8e004f5aa5d1e269fa1d7c"} Nov 26 14:34:30 crc kubenswrapper[5037]: I1126 14:34:30.026722 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-6998585d5-lxggf" Nov 26 14:34:30 crc kubenswrapper[5037]: I1126 14:34:30.096734 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-6998585d5-lxggf" podStartSLOduration=2.8660705650000002 podStartE2EDuration="10.096716413s" podCreationTimestamp="2025-11-26 14:34:20 +0000 UTC" firstStartedPulling="2025-11-26 14:34:21.680571284 +0000 UTC m=+1128.477341478" lastFinishedPulling="2025-11-26 14:34:28.911217142 +0000 UTC m=+1135.707987326" observedRunningTime="2025-11-26 14:34:30.08839458 +0000 UTC m=+1136.885164774" watchObservedRunningTime="2025-11-26 14:34:30.096716413 +0000 UTC m=+1136.893486597" Nov 26 14:34:31 crc kubenswrapper[5037]: I1126 14:34:31.035341 5037 generic.go:334] "Generic (PLEG): container finished" podID="a8dac2bd-68ba-4cab-8119-051f1d14219f" containerID="5291ad33d3a6c0b582931497a1142b9d6ff26693dc88b528b333d33ad251f401" exitCode=0 Nov 26 14:34:31 crc kubenswrapper[5037]: I1126 14:34:31.035435 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qp4qk" event={"ID":"a8dac2bd-68ba-4cab-8119-051f1d14219f","Type":"ContainerDied","Data":"5291ad33d3a6c0b582931497a1142b9d6ff26693dc88b528b333d33ad251f401"} Nov 26 14:34:32 crc kubenswrapper[5037]: I1126 14:34:32.042801 5037 generic.go:334] "Generic (PLEG): container finished" podID="a8dac2bd-68ba-4cab-8119-051f1d14219f" containerID="e989c32e76a0532388bef95da3227596280c395135d7971e51fed965493acd4e" exitCode=0 Nov 26 14:34:32 crc kubenswrapper[5037]: I1126 14:34:32.043509 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qp4qk" event={"ID":"a8dac2bd-68ba-4cab-8119-051f1d14219f","Type":"ContainerDied","Data":"e989c32e76a0532388bef95da3227596280c395135d7971e51fed965493acd4e"} Nov 26 14:34:33 crc kubenswrapper[5037]: I1126 14:34:33.062555 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qp4qk" event={"ID":"a8dac2bd-68ba-4cab-8119-051f1d14219f","Type":"ContainerStarted","Data":"5580afd3ca0dcb63e08bb75604e866735ba9f43869554ac2001aed6ee7c3102c"} Nov 26 14:34:33 crc kubenswrapper[5037]: I1126 14:34:33.062892 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qp4qk" event={"ID":"a8dac2bd-68ba-4cab-8119-051f1d14219f","Type":"ContainerStarted","Data":"e3862314c60706ee0bb057caea830af28ba19a4b45c8cb844a099f12ea01a5c7"} Nov 26 14:34:33 crc kubenswrapper[5037]: I1126 14:34:33.062910 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qp4qk" event={"ID":"a8dac2bd-68ba-4cab-8119-051f1d14219f","Type":"ContainerStarted","Data":"ca96b3648134bece7247e6e11b070a680e8b9e6a1ae47c0705dc7d6c789a8d52"} Nov 26 14:34:33 crc kubenswrapper[5037]: I1126 14:34:33.062930 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qp4qk" event={"ID":"a8dac2bd-68ba-4cab-8119-051f1d14219f","Type":"ContainerStarted","Data":"a4238ebda996198581faf8d6b84e74e2cdd0e7796db4b97aa27bdf77ba7dedc9"} Nov 26 14:34:34 crc kubenswrapper[5037]: I1126 14:34:34.071368 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qp4qk" event={"ID":"a8dac2bd-68ba-4cab-8119-051f1d14219f","Type":"ContainerStarted","Data":"979d1f26cffc29b7cb7fa89d1bf4f51e0fd25c06c9c213b3e8cf32587d02530d"} Nov 26 14:34:34 crc kubenswrapper[5037]: I1126 14:34:34.071679 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qp4qk" event={"ID":"a8dac2bd-68ba-4cab-8119-051f1d14219f","Type":"ContainerStarted","Data":"32fc83d6828755e91d2656f79d98ad8aa847da994194355f64fc95a2aac6e96f"} Nov 26 14:34:34 crc kubenswrapper[5037]: I1126 14:34:34.072622 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-qp4qk" Nov 26 14:34:34 crc kubenswrapper[5037]: I1126 14:34:34.429111 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-qp4qk" podStartSLOduration=6.59113238 podStartE2EDuration="14.429077588s" podCreationTimestamp="2025-11-26 14:34:20 +0000 UTC" firstStartedPulling="2025-11-26 14:34:21.057364036 +0000 UTC m=+1127.854134220" lastFinishedPulling="2025-11-26 14:34:28.895309244 +0000 UTC m=+1135.692079428" observedRunningTime="2025-11-26 14:34:34.105840985 +0000 UTC m=+1140.902611169" watchObservedRunningTime="2025-11-26 14:34:34.429077588 +0000 UTC m=+1141.225847812" Nov 26 14:34:34 crc kubenswrapper[5037]: I1126 14:34:34.434164 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-plzdd"] Nov 26 14:34:34 crc kubenswrapper[5037]: I1126 14:34:34.436183 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-plzdd" Nov 26 14:34:34 crc kubenswrapper[5037]: I1126 14:34:34.443895 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-plzdd"] Nov 26 14:34:34 crc kubenswrapper[5037]: I1126 14:34:34.575053 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd-utilities\") pod \"certified-operators-plzdd\" (UID: \"4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd\") " pod="openshift-marketplace/certified-operators-plzdd" Nov 26 14:34:34 crc kubenswrapper[5037]: I1126 14:34:34.575098 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd-catalog-content\") pod \"certified-operators-plzdd\" (UID: \"4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd\") " pod="openshift-marketplace/certified-operators-plzdd" Nov 26 14:34:34 crc kubenswrapper[5037]: I1126 14:34:34.575131 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gpgkh\" (UniqueName: \"kubernetes.io/projected/4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd-kube-api-access-gpgkh\") pod \"certified-operators-plzdd\" (UID: \"4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd\") " pod="openshift-marketplace/certified-operators-plzdd" Nov 26 14:34:34 crc kubenswrapper[5037]: I1126 14:34:34.676225 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gpgkh\" (UniqueName: \"kubernetes.io/projected/4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd-kube-api-access-gpgkh\") pod \"certified-operators-plzdd\" (UID: \"4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd\") " pod="openshift-marketplace/certified-operators-plzdd" Nov 26 14:34:34 crc kubenswrapper[5037]: I1126 14:34:34.676348 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd-utilities\") pod \"certified-operators-plzdd\" (UID: \"4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd\") " pod="openshift-marketplace/certified-operators-plzdd" Nov 26 14:34:34 crc kubenswrapper[5037]: I1126 14:34:34.676370 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd-catalog-content\") pod \"certified-operators-plzdd\" (UID: \"4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd\") " pod="openshift-marketplace/certified-operators-plzdd" Nov 26 14:34:34 crc kubenswrapper[5037]: I1126 14:34:34.676874 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd-catalog-content\") pod \"certified-operators-plzdd\" (UID: \"4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd\") " pod="openshift-marketplace/certified-operators-plzdd" Nov 26 14:34:34 crc kubenswrapper[5037]: I1126 14:34:34.677058 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd-utilities\") pod \"certified-operators-plzdd\" (UID: \"4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd\") " pod="openshift-marketplace/certified-operators-plzdd" Nov 26 14:34:34 crc kubenswrapper[5037]: I1126 14:34:34.700700 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gpgkh\" (UniqueName: \"kubernetes.io/projected/4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd-kube-api-access-gpgkh\") pod \"certified-operators-plzdd\" (UID: \"4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd\") " pod="openshift-marketplace/certified-operators-plzdd" Nov 26 14:34:34 crc kubenswrapper[5037]: I1126 14:34:34.757941 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-plzdd" Nov 26 14:34:35 crc kubenswrapper[5037]: I1126 14:34:35.196730 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-plzdd"] Nov 26 14:34:35 crc kubenswrapper[5037]: I1126 14:34:35.813940 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-qp4qk" Nov 26 14:34:35 crc kubenswrapper[5037]: I1126 14:34:35.884130 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-qp4qk" Nov 26 14:34:36 crc kubenswrapper[5037]: I1126 14:34:36.084525 5037 generic.go:334] "Generic (PLEG): container finished" podID="4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd" containerID="1d10cfaa2c5cd9c3402c0f6acb2d54d0ba8d84678eda76f26239eb401906cef9" exitCode=0 Nov 26 14:34:36 crc kubenswrapper[5037]: I1126 14:34:36.084587 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-plzdd" event={"ID":"4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd","Type":"ContainerDied","Data":"1d10cfaa2c5cd9c3402c0f6acb2d54d0ba8d84678eda76f26239eb401906cef9"} Nov 26 14:34:36 crc kubenswrapper[5037]: I1126 14:34:36.084640 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-plzdd" event={"ID":"4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd","Type":"ContainerStarted","Data":"26106e0eb6f51bf2519a9f0ad91f37083ca610ed1e7a225d2952d9eaf73f9fd8"} Nov 26 14:34:39 crc kubenswrapper[5037]: I1126 14:34:39.106723 5037 generic.go:334] "Generic (PLEG): container finished" podID="4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd" containerID="fe8c8e7f69a8a013df330758f514a32814e9ef0ac9b168f7f429535d1096cb91" exitCode=0 Nov 26 14:34:39 crc kubenswrapper[5037]: I1126 14:34:39.106833 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-plzdd" event={"ID":"4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd","Type":"ContainerDied","Data":"fe8c8e7f69a8a013df330758f514a32814e9ef0ac9b168f7f429535d1096cb91"} Nov 26 14:34:40 crc kubenswrapper[5037]: I1126 14:34:40.115079 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-plzdd" event={"ID":"4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd","Type":"ContainerStarted","Data":"bbd82e21293e2395515bb2eb4ce42d745c02a126732fc49d92acd546d0e5dcf4"} Nov 26 14:34:40 crc kubenswrapper[5037]: I1126 14:34:40.921817 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6c7b4b5f48-whwvq" Nov 26 14:34:40 crc kubenswrapper[5037]: I1126 14:34:40.942636 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-plzdd" podStartSLOduration=3.236238047 podStartE2EDuration="6.942614387s" podCreationTimestamp="2025-11-26 14:34:34 +0000 UTC" firstStartedPulling="2025-11-26 14:34:36.086117529 +0000 UTC m=+1142.882887713" lastFinishedPulling="2025-11-26 14:34:39.792493879 +0000 UTC m=+1146.589264053" observedRunningTime="2025-11-26 14:34:40.138830735 +0000 UTC m=+1146.935600929" watchObservedRunningTime="2025-11-26 14:34:40.942614387 +0000 UTC m=+1147.739384571" Nov 26 14:34:41 crc kubenswrapper[5037]: I1126 14:34:41.412894 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-6998585d5-lxggf" Nov 26 14:34:42 crc kubenswrapper[5037]: I1126 14:34:42.700743 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-mcjkn" Nov 26 14:34:44 crc kubenswrapper[5037]: I1126 14:34:44.139460 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931axsndb"] Nov 26 14:34:44 crc kubenswrapper[5037]: I1126 14:34:44.140706 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931axsndb" Nov 26 14:34:44 crc kubenswrapper[5037]: I1126 14:34:44.142938 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 26 14:34:44 crc kubenswrapper[5037]: I1126 14:34:44.153435 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931axsndb"] Nov 26 14:34:44 crc kubenswrapper[5037]: I1126 14:34:44.320086 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pwr5r\" (UniqueName: \"kubernetes.io/projected/9ec4a60b-dff4-466f-815c-881dfc3b73aa-kube-api-access-pwr5r\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931axsndb\" (UID: \"9ec4a60b-dff4-466f-815c-881dfc3b73aa\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931axsndb" Nov 26 14:34:44 crc kubenswrapper[5037]: I1126 14:34:44.320154 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9ec4a60b-dff4-466f-815c-881dfc3b73aa-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931axsndb\" (UID: \"9ec4a60b-dff4-466f-815c-881dfc3b73aa\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931axsndb" Nov 26 14:34:44 crc kubenswrapper[5037]: I1126 14:34:44.320197 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9ec4a60b-dff4-466f-815c-881dfc3b73aa-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931axsndb\" (UID: \"9ec4a60b-dff4-466f-815c-881dfc3b73aa\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931axsndb" Nov 26 14:34:44 crc kubenswrapper[5037]: I1126 14:34:44.421481 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9ec4a60b-dff4-466f-815c-881dfc3b73aa-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931axsndb\" (UID: \"9ec4a60b-dff4-466f-815c-881dfc3b73aa\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931axsndb" Nov 26 14:34:44 crc kubenswrapper[5037]: I1126 14:34:44.421598 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pwr5r\" (UniqueName: \"kubernetes.io/projected/9ec4a60b-dff4-466f-815c-881dfc3b73aa-kube-api-access-pwr5r\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931axsndb\" (UID: \"9ec4a60b-dff4-466f-815c-881dfc3b73aa\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931axsndb" Nov 26 14:34:44 crc kubenswrapper[5037]: I1126 14:34:44.421652 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9ec4a60b-dff4-466f-815c-881dfc3b73aa-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931axsndb\" (UID: \"9ec4a60b-dff4-466f-815c-881dfc3b73aa\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931axsndb" Nov 26 14:34:44 crc kubenswrapper[5037]: I1126 14:34:44.422708 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9ec4a60b-dff4-466f-815c-881dfc3b73aa-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931axsndb\" (UID: \"9ec4a60b-dff4-466f-815c-881dfc3b73aa\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931axsndb" Nov 26 14:34:44 crc kubenswrapper[5037]: I1126 14:34:44.422835 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9ec4a60b-dff4-466f-815c-881dfc3b73aa-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931axsndb\" (UID: \"9ec4a60b-dff4-466f-815c-881dfc3b73aa\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931axsndb" Nov 26 14:34:44 crc kubenswrapper[5037]: I1126 14:34:44.449440 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pwr5r\" (UniqueName: \"kubernetes.io/projected/9ec4a60b-dff4-466f-815c-881dfc3b73aa-kube-api-access-pwr5r\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931axsndb\" (UID: \"9ec4a60b-dff4-466f-815c-881dfc3b73aa\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931axsndb" Nov 26 14:34:44 crc kubenswrapper[5037]: I1126 14:34:44.457395 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931axsndb" Nov 26 14:34:44 crc kubenswrapper[5037]: I1126 14:34:44.758871 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-plzdd" Nov 26 14:34:44 crc kubenswrapper[5037]: I1126 14:34:44.760705 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-plzdd" Nov 26 14:34:44 crc kubenswrapper[5037]: I1126 14:34:44.810357 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-plzdd" Nov 26 14:34:44 crc kubenswrapper[5037]: I1126 14:34:44.913431 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931axsndb"] Nov 26 14:34:44 crc kubenswrapper[5037]: W1126 14:34:44.917247 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9ec4a60b_dff4_466f_815c_881dfc3b73aa.slice/crio-ad8367dc3c1b2f6ef5f21ba34af06fed9fbc82404bb905d236e2965ab6744dc2 WatchSource:0}: Error finding container ad8367dc3c1b2f6ef5f21ba34af06fed9fbc82404bb905d236e2965ab6744dc2: Status 404 returned error can't find the container with id ad8367dc3c1b2f6ef5f21ba34af06fed9fbc82404bb905d236e2965ab6744dc2 Nov 26 14:34:45 crc kubenswrapper[5037]: I1126 14:34:45.145917 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931axsndb" event={"ID":"9ec4a60b-dff4-466f-815c-881dfc3b73aa","Type":"ContainerStarted","Data":"ad8367dc3c1b2f6ef5f21ba34af06fed9fbc82404bb905d236e2965ab6744dc2"} Nov 26 14:34:45 crc kubenswrapper[5037]: I1126 14:34:45.190862 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-plzdd" Nov 26 14:34:46 crc kubenswrapper[5037]: I1126 14:34:46.153706 5037 generic.go:334] "Generic (PLEG): container finished" podID="9ec4a60b-dff4-466f-815c-881dfc3b73aa" containerID="d7f75b3e9826131a1b575364cb585d39aeabfcb7aad153bfdd1a22bf5e35ed15" exitCode=0 Nov 26 14:34:46 crc kubenswrapper[5037]: I1126 14:34:46.154997 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931axsndb" event={"ID":"9ec4a60b-dff4-466f-815c-881dfc3b73aa","Type":"ContainerDied","Data":"d7f75b3e9826131a1b575364cb585d39aeabfcb7aad153bfdd1a22bf5e35ed15"} Nov 26 14:34:47 crc kubenswrapper[5037]: I1126 14:34:47.884573 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-plzdd"] Nov 26 14:34:47 crc kubenswrapper[5037]: I1126 14:34:47.884875 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-plzdd" podUID="4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd" containerName="registry-server" containerID="cri-o://bbd82e21293e2395515bb2eb4ce42d745c02a126732fc49d92acd546d0e5dcf4" gracePeriod=2 Nov 26 14:34:48 crc kubenswrapper[5037]: I1126 14:34:48.175060 5037 generic.go:334] "Generic (PLEG): container finished" podID="4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd" containerID="bbd82e21293e2395515bb2eb4ce42d745c02a126732fc49d92acd546d0e5dcf4" exitCode=0 Nov 26 14:34:48 crc kubenswrapper[5037]: I1126 14:34:48.175260 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-plzdd" event={"ID":"4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd","Type":"ContainerDied","Data":"bbd82e21293e2395515bb2eb4ce42d745c02a126732fc49d92acd546d0e5dcf4"} Nov 26 14:34:48 crc kubenswrapper[5037]: I1126 14:34:48.265055 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-plzdd" Nov 26 14:34:48 crc kubenswrapper[5037]: I1126 14:34:48.378255 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd-utilities\") pod \"4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd\" (UID: \"4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd\") " Nov 26 14:34:48 crc kubenswrapper[5037]: I1126 14:34:48.378367 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd-catalog-content\") pod \"4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd\" (UID: \"4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd\") " Nov 26 14:34:48 crc kubenswrapper[5037]: I1126 14:34:48.378436 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gpgkh\" (UniqueName: \"kubernetes.io/projected/4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd-kube-api-access-gpgkh\") pod \"4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd\" (UID: \"4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd\") " Nov 26 14:34:48 crc kubenswrapper[5037]: I1126 14:34:48.379417 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd-utilities" (OuterVolumeSpecName: "utilities") pod "4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd" (UID: "4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:34:48 crc kubenswrapper[5037]: I1126 14:34:48.397518 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd-kube-api-access-gpgkh" (OuterVolumeSpecName: "kube-api-access-gpgkh") pod "4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd" (UID: "4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd"). InnerVolumeSpecName "kube-api-access-gpgkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:34:48 crc kubenswrapper[5037]: I1126 14:34:48.426031 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd" (UID: "4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:34:48 crc kubenswrapper[5037]: I1126 14:34:48.480434 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 14:34:48 crc kubenswrapper[5037]: I1126 14:34:48.480468 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 14:34:48 crc kubenswrapper[5037]: I1126 14:34:48.480479 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gpgkh\" (UniqueName: \"kubernetes.io/projected/4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd-kube-api-access-gpgkh\") on node \"crc\" DevicePath \"\"" Nov 26 14:34:49 crc kubenswrapper[5037]: I1126 14:34:49.185186 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-plzdd" event={"ID":"4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd","Type":"ContainerDied","Data":"26106e0eb6f51bf2519a9f0ad91f37083ca610ed1e7a225d2952d9eaf73f9fd8"} Nov 26 14:34:49 crc kubenswrapper[5037]: I1126 14:34:49.185426 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-plzdd" Nov 26 14:34:49 crc kubenswrapper[5037]: I1126 14:34:49.185597 5037 scope.go:117] "RemoveContainer" containerID="bbd82e21293e2395515bb2eb4ce42d745c02a126732fc49d92acd546d0e5dcf4" Nov 26 14:34:49 crc kubenswrapper[5037]: I1126 14:34:49.221448 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-plzdd"] Nov 26 14:34:49 crc kubenswrapper[5037]: I1126 14:34:49.230639 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-plzdd"] Nov 26 14:34:49 crc kubenswrapper[5037]: I1126 14:34:49.887016 5037 scope.go:117] "RemoveContainer" containerID="fe8c8e7f69a8a013df330758f514a32814e9ef0ac9b168f7f429535d1096cb91" Nov 26 14:34:49 crc kubenswrapper[5037]: I1126 14:34:49.918940 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd" path="/var/lib/kubelet/pods/4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd/volumes" Nov 26 14:34:50 crc kubenswrapper[5037]: I1126 14:34:50.078455 5037 scope.go:117] "RemoveContainer" containerID="1d10cfaa2c5cd9c3402c0f6acb2d54d0ba8d84678eda76f26239eb401906cef9" Nov 26 14:34:50 crc kubenswrapper[5037]: I1126 14:34:50.816724 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-qp4qk" Nov 26 14:34:51 crc kubenswrapper[5037]: I1126 14:34:51.199819 5037 generic.go:334] "Generic (PLEG): container finished" podID="9ec4a60b-dff4-466f-815c-881dfc3b73aa" containerID="c50afa2b876146c6157b39afce0b3800fb25b3b98353ae98545510c51cbd690e" exitCode=0 Nov 26 14:34:51 crc kubenswrapper[5037]: I1126 14:34:51.199858 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931axsndb" event={"ID":"9ec4a60b-dff4-466f-815c-881dfc3b73aa","Type":"ContainerDied","Data":"c50afa2b876146c6157b39afce0b3800fb25b3b98353ae98545510c51cbd690e"} Nov 26 14:34:52 crc kubenswrapper[5037]: I1126 14:34:52.223520 5037 generic.go:334] "Generic (PLEG): container finished" podID="9ec4a60b-dff4-466f-815c-881dfc3b73aa" containerID="481847bc0695edbe33329a195b3c007d2cb60eff5183a0cdf4f62c59280dc1ba" exitCode=0 Nov 26 14:34:52 crc kubenswrapper[5037]: I1126 14:34:52.223606 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931axsndb" event={"ID":"9ec4a60b-dff4-466f-815c-881dfc3b73aa","Type":"ContainerDied","Data":"481847bc0695edbe33329a195b3c007d2cb60eff5183a0cdf4f62c59280dc1ba"} Nov 26 14:34:53 crc kubenswrapper[5037]: I1126 14:34:53.496369 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931axsndb" Nov 26 14:34:53 crc kubenswrapper[5037]: I1126 14:34:53.659539 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9ec4a60b-dff4-466f-815c-881dfc3b73aa-util\") pod \"9ec4a60b-dff4-466f-815c-881dfc3b73aa\" (UID: \"9ec4a60b-dff4-466f-815c-881dfc3b73aa\") " Nov 26 14:34:53 crc kubenswrapper[5037]: I1126 14:34:53.659793 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9ec4a60b-dff4-466f-815c-881dfc3b73aa-bundle\") pod \"9ec4a60b-dff4-466f-815c-881dfc3b73aa\" (UID: \"9ec4a60b-dff4-466f-815c-881dfc3b73aa\") " Nov 26 14:34:53 crc kubenswrapper[5037]: I1126 14:34:53.659975 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pwr5r\" (UniqueName: \"kubernetes.io/projected/9ec4a60b-dff4-466f-815c-881dfc3b73aa-kube-api-access-pwr5r\") pod \"9ec4a60b-dff4-466f-815c-881dfc3b73aa\" (UID: \"9ec4a60b-dff4-466f-815c-881dfc3b73aa\") " Nov 26 14:34:53 crc kubenswrapper[5037]: I1126 14:34:53.660836 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9ec4a60b-dff4-466f-815c-881dfc3b73aa-bundle" (OuterVolumeSpecName: "bundle") pod "9ec4a60b-dff4-466f-815c-881dfc3b73aa" (UID: "9ec4a60b-dff4-466f-815c-881dfc3b73aa"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:34:53 crc kubenswrapper[5037]: I1126 14:34:53.669913 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9ec4a60b-dff4-466f-815c-881dfc3b73aa-util" (OuterVolumeSpecName: "util") pod "9ec4a60b-dff4-466f-815c-881dfc3b73aa" (UID: "9ec4a60b-dff4-466f-815c-881dfc3b73aa"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:34:53 crc kubenswrapper[5037]: I1126 14:34:53.673420 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ec4a60b-dff4-466f-815c-881dfc3b73aa-kube-api-access-pwr5r" (OuterVolumeSpecName: "kube-api-access-pwr5r") pod "9ec4a60b-dff4-466f-815c-881dfc3b73aa" (UID: "9ec4a60b-dff4-466f-815c-881dfc3b73aa"). InnerVolumeSpecName "kube-api-access-pwr5r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:34:53 crc kubenswrapper[5037]: I1126 14:34:53.761745 5037 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9ec4a60b-dff4-466f-815c-881dfc3b73aa-util\") on node \"crc\" DevicePath \"\"" Nov 26 14:34:53 crc kubenswrapper[5037]: I1126 14:34:53.761795 5037 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9ec4a60b-dff4-466f-815c-881dfc3b73aa-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:34:53 crc kubenswrapper[5037]: I1126 14:34:53.761809 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pwr5r\" (UniqueName: \"kubernetes.io/projected/9ec4a60b-dff4-466f-815c-881dfc3b73aa-kube-api-access-pwr5r\") on node \"crc\" DevicePath \"\"" Nov 26 14:34:54 crc kubenswrapper[5037]: I1126 14:34:54.239369 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931axsndb" event={"ID":"9ec4a60b-dff4-466f-815c-881dfc3b73aa","Type":"ContainerDied","Data":"ad8367dc3c1b2f6ef5f21ba34af06fed9fbc82404bb905d236e2965ab6744dc2"} Nov 26 14:34:54 crc kubenswrapper[5037]: I1126 14:34:54.239701 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ad8367dc3c1b2f6ef5f21ba34af06fed9fbc82404bb905d236e2965ab6744dc2" Nov 26 14:34:54 crc kubenswrapper[5037]: I1126 14:34:54.239460 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931axsndb" Nov 26 14:35:02 crc kubenswrapper[5037]: I1126 14:35:02.660488 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-rwkdk"] Nov 26 14:35:02 crc kubenswrapper[5037]: E1126 14:35:02.661222 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ec4a60b-dff4-466f-815c-881dfc3b73aa" containerName="util" Nov 26 14:35:02 crc kubenswrapper[5037]: I1126 14:35:02.661234 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ec4a60b-dff4-466f-815c-881dfc3b73aa" containerName="util" Nov 26 14:35:02 crc kubenswrapper[5037]: E1126 14:35:02.661243 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ec4a60b-dff4-466f-815c-881dfc3b73aa" containerName="extract" Nov 26 14:35:02 crc kubenswrapper[5037]: I1126 14:35:02.661249 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ec4a60b-dff4-466f-815c-881dfc3b73aa" containerName="extract" Nov 26 14:35:02 crc kubenswrapper[5037]: E1126 14:35:02.661259 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd" containerName="extract-content" Nov 26 14:35:02 crc kubenswrapper[5037]: I1126 14:35:02.661266 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd" containerName="extract-content" Nov 26 14:35:02 crc kubenswrapper[5037]: E1126 14:35:02.661278 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd" containerName="extract-utilities" Nov 26 14:35:02 crc kubenswrapper[5037]: I1126 14:35:02.661300 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd" containerName="extract-utilities" Nov 26 14:35:02 crc kubenswrapper[5037]: E1126 14:35:02.661315 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ec4a60b-dff4-466f-815c-881dfc3b73aa" containerName="pull" Nov 26 14:35:02 crc kubenswrapper[5037]: I1126 14:35:02.661321 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ec4a60b-dff4-466f-815c-881dfc3b73aa" containerName="pull" Nov 26 14:35:02 crc kubenswrapper[5037]: E1126 14:35:02.661334 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd" containerName="registry-server" Nov 26 14:35:02 crc kubenswrapper[5037]: I1126 14:35:02.661340 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd" containerName="registry-server" Nov 26 14:35:02 crc kubenswrapper[5037]: I1126 14:35:02.661431 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d3aac5b-ee91-4ccd-b0fa-b33740e2c7cd" containerName="registry-server" Nov 26 14:35:02 crc kubenswrapper[5037]: I1126 14:35:02.661443 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ec4a60b-dff4-466f-815c-881dfc3b73aa" containerName="extract" Nov 26 14:35:02 crc kubenswrapper[5037]: I1126 14:35:02.661848 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-rwkdk" Nov 26 14:35:02 crc kubenswrapper[5037]: I1126 14:35:02.663808 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"openshift-service-ca.crt" Nov 26 14:35:02 crc kubenswrapper[5037]: I1126 14:35:02.663946 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"kube-root-ca.crt" Nov 26 14:35:02 crc kubenswrapper[5037]: I1126 14:35:02.663808 5037 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager-operator"/"cert-manager-operator-controller-manager-dockercfg-85wkg" Nov 26 14:35:02 crc kubenswrapper[5037]: I1126 14:35:02.681504 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-rwkdk"] Nov 26 14:35:02 crc kubenswrapper[5037]: I1126 14:35:02.783446 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tm4q4\" (UniqueName: \"kubernetes.io/projected/3c5f32c0-544d-4f16-bb6d-6c81029660ae-kube-api-access-tm4q4\") pod \"cert-manager-operator-controller-manager-64cf6dff88-rwkdk\" (UID: \"3c5f32c0-544d-4f16-bb6d-6c81029660ae\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-rwkdk" Nov 26 14:35:02 crc kubenswrapper[5037]: I1126 14:35:02.783513 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/3c5f32c0-544d-4f16-bb6d-6c81029660ae-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-rwkdk\" (UID: \"3c5f32c0-544d-4f16-bb6d-6c81029660ae\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-rwkdk" Nov 26 14:35:02 crc kubenswrapper[5037]: I1126 14:35:02.884507 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/3c5f32c0-544d-4f16-bb6d-6c81029660ae-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-rwkdk\" (UID: \"3c5f32c0-544d-4f16-bb6d-6c81029660ae\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-rwkdk" Nov 26 14:35:02 crc kubenswrapper[5037]: I1126 14:35:02.884908 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tm4q4\" (UniqueName: \"kubernetes.io/projected/3c5f32c0-544d-4f16-bb6d-6c81029660ae-kube-api-access-tm4q4\") pod \"cert-manager-operator-controller-manager-64cf6dff88-rwkdk\" (UID: \"3c5f32c0-544d-4f16-bb6d-6c81029660ae\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-rwkdk" Nov 26 14:35:02 crc kubenswrapper[5037]: I1126 14:35:02.885590 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/3c5f32c0-544d-4f16-bb6d-6c81029660ae-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-rwkdk\" (UID: \"3c5f32c0-544d-4f16-bb6d-6c81029660ae\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-rwkdk" Nov 26 14:35:02 crc kubenswrapper[5037]: I1126 14:35:02.904112 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tm4q4\" (UniqueName: \"kubernetes.io/projected/3c5f32c0-544d-4f16-bb6d-6c81029660ae-kube-api-access-tm4q4\") pod \"cert-manager-operator-controller-manager-64cf6dff88-rwkdk\" (UID: \"3c5f32c0-544d-4f16-bb6d-6c81029660ae\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-rwkdk" Nov 26 14:35:02 crc kubenswrapper[5037]: I1126 14:35:02.980332 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-rwkdk" Nov 26 14:35:03 crc kubenswrapper[5037]: I1126 14:35:03.359933 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-rwkdk"] Nov 26 14:35:04 crc kubenswrapper[5037]: I1126 14:35:04.298255 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-rwkdk" event={"ID":"3c5f32c0-544d-4f16-bb6d-6c81029660ae","Type":"ContainerStarted","Data":"037a753eade58fa6e1c5f64a501c0d56cc91005c4e3007182cdadc4c39973938"} Nov 26 14:35:08 crc kubenswrapper[5037]: I1126 14:35:08.357208 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-5rq42"] Nov 26 14:35:08 crc kubenswrapper[5037]: I1126 14:35:08.358614 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5rq42" Nov 26 14:35:08 crc kubenswrapper[5037]: I1126 14:35:08.381602 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5rq42"] Nov 26 14:35:08 crc kubenswrapper[5037]: I1126 14:35:08.471038 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ffkw7\" (UniqueName: \"kubernetes.io/projected/0a146e4e-9143-4339-a5dd-99456ee68d8d-kube-api-access-ffkw7\") pod \"community-operators-5rq42\" (UID: \"0a146e4e-9143-4339-a5dd-99456ee68d8d\") " pod="openshift-marketplace/community-operators-5rq42" Nov 26 14:35:08 crc kubenswrapper[5037]: I1126 14:35:08.471094 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a146e4e-9143-4339-a5dd-99456ee68d8d-catalog-content\") pod \"community-operators-5rq42\" (UID: \"0a146e4e-9143-4339-a5dd-99456ee68d8d\") " pod="openshift-marketplace/community-operators-5rq42" Nov 26 14:35:08 crc kubenswrapper[5037]: I1126 14:35:08.471149 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a146e4e-9143-4339-a5dd-99456ee68d8d-utilities\") pod \"community-operators-5rq42\" (UID: \"0a146e4e-9143-4339-a5dd-99456ee68d8d\") " pod="openshift-marketplace/community-operators-5rq42" Nov 26 14:35:08 crc kubenswrapper[5037]: I1126 14:35:08.572809 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a146e4e-9143-4339-a5dd-99456ee68d8d-catalog-content\") pod \"community-operators-5rq42\" (UID: \"0a146e4e-9143-4339-a5dd-99456ee68d8d\") " pod="openshift-marketplace/community-operators-5rq42" Nov 26 14:35:08 crc kubenswrapper[5037]: I1126 14:35:08.572897 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a146e4e-9143-4339-a5dd-99456ee68d8d-utilities\") pod \"community-operators-5rq42\" (UID: \"0a146e4e-9143-4339-a5dd-99456ee68d8d\") " pod="openshift-marketplace/community-operators-5rq42" Nov 26 14:35:08 crc kubenswrapper[5037]: I1126 14:35:08.572984 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ffkw7\" (UniqueName: \"kubernetes.io/projected/0a146e4e-9143-4339-a5dd-99456ee68d8d-kube-api-access-ffkw7\") pod \"community-operators-5rq42\" (UID: \"0a146e4e-9143-4339-a5dd-99456ee68d8d\") " pod="openshift-marketplace/community-operators-5rq42" Nov 26 14:35:08 crc kubenswrapper[5037]: I1126 14:35:08.573550 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a146e4e-9143-4339-a5dd-99456ee68d8d-utilities\") pod \"community-operators-5rq42\" (UID: \"0a146e4e-9143-4339-a5dd-99456ee68d8d\") " pod="openshift-marketplace/community-operators-5rq42" Nov 26 14:35:08 crc kubenswrapper[5037]: I1126 14:35:08.573932 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a146e4e-9143-4339-a5dd-99456ee68d8d-catalog-content\") pod \"community-operators-5rq42\" (UID: \"0a146e4e-9143-4339-a5dd-99456ee68d8d\") " pod="openshift-marketplace/community-operators-5rq42" Nov 26 14:35:08 crc kubenswrapper[5037]: I1126 14:35:08.596413 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ffkw7\" (UniqueName: \"kubernetes.io/projected/0a146e4e-9143-4339-a5dd-99456ee68d8d-kube-api-access-ffkw7\") pod \"community-operators-5rq42\" (UID: \"0a146e4e-9143-4339-a5dd-99456ee68d8d\") " pod="openshift-marketplace/community-operators-5rq42" Nov 26 14:35:08 crc kubenswrapper[5037]: I1126 14:35:08.676693 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5rq42" Nov 26 14:35:10 crc kubenswrapper[5037]: I1126 14:35:10.785377 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5rq42"] Nov 26 14:35:11 crc kubenswrapper[5037]: I1126 14:35:11.347460 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-rwkdk" event={"ID":"3c5f32c0-544d-4f16-bb6d-6c81029660ae","Type":"ContainerStarted","Data":"43063d3d2ed913dbcc4b3c64cf25e75c4c58b167ac2d53b78d66f1ec11e6698f"} Nov 26 14:35:11 crc kubenswrapper[5037]: I1126 14:35:11.350258 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5rq42" event={"ID":"0a146e4e-9143-4339-a5dd-99456ee68d8d","Type":"ContainerDied","Data":"84ffdcda98a3daeadc43696bd71213042440601a70d7e08d4227181371fa90a9"} Nov 26 14:35:11 crc kubenswrapper[5037]: I1126 14:35:11.350183 5037 generic.go:334] "Generic (PLEG): container finished" podID="0a146e4e-9143-4339-a5dd-99456ee68d8d" containerID="84ffdcda98a3daeadc43696bd71213042440601a70d7e08d4227181371fa90a9" exitCode=0 Nov 26 14:35:11 crc kubenswrapper[5037]: I1126 14:35:11.350437 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5rq42" event={"ID":"0a146e4e-9143-4339-a5dd-99456ee68d8d","Type":"ContainerStarted","Data":"98886218bddb657b486685e5cc7ce947101013ec6bdaf592e63171e9d8ab6334"} Nov 26 14:35:11 crc kubenswrapper[5037]: I1126 14:35:11.398862 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-rwkdk" podStartSLOduration=2.094988702 podStartE2EDuration="9.398818984s" podCreationTimestamp="2025-11-26 14:35:02 +0000 UTC" firstStartedPulling="2025-11-26 14:35:03.367478602 +0000 UTC m=+1170.164248796" lastFinishedPulling="2025-11-26 14:35:10.671308874 +0000 UTC m=+1177.468079078" observedRunningTime="2025-11-26 14:35:11.386983796 +0000 UTC m=+1178.183754010" watchObservedRunningTime="2025-11-26 14:35:11.398818984 +0000 UTC m=+1178.195589198" Nov 26 14:35:13 crc kubenswrapper[5037]: I1126 14:35:13.366940 5037 generic.go:334] "Generic (PLEG): container finished" podID="0a146e4e-9143-4339-a5dd-99456ee68d8d" containerID="1f47654af340cae4c4487da5b2d39a7ece41b264bd8e9852e3a2b0208fbbd717" exitCode=0 Nov 26 14:35:13 crc kubenswrapper[5037]: I1126 14:35:13.367176 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5rq42" event={"ID":"0a146e4e-9143-4339-a5dd-99456ee68d8d","Type":"ContainerDied","Data":"1f47654af340cae4c4487da5b2d39a7ece41b264bd8e9852e3a2b0208fbbd717"} Nov 26 14:35:15 crc kubenswrapper[5037]: I1126 14:35:15.384990 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5rq42" event={"ID":"0a146e4e-9143-4339-a5dd-99456ee68d8d","Type":"ContainerStarted","Data":"3ef238b98445175b5e5f59fb4be0de9b2bab4b5769d32d5f238d39eeeb7010c3"} Nov 26 14:35:15 crc kubenswrapper[5037]: I1126 14:35:15.406818 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-5rq42" podStartSLOduration=3.972923238 podStartE2EDuration="7.406800758s" podCreationTimestamp="2025-11-26 14:35:08 +0000 UTC" firstStartedPulling="2025-11-26 14:35:11.352570465 +0000 UTC m=+1178.149340679" lastFinishedPulling="2025-11-26 14:35:14.786448015 +0000 UTC m=+1181.583218199" observedRunningTime="2025-11-26 14:35:15.402422281 +0000 UTC m=+1182.199192465" watchObservedRunningTime="2025-11-26 14:35:15.406800758 +0000 UTC m=+1182.203570942" Nov 26 14:35:17 crc kubenswrapper[5037]: I1126 14:35:17.962511 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-dvzww"] Nov 26 14:35:17 crc kubenswrapper[5037]: I1126 14:35:17.963680 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-855d9ccff4-dvzww" Nov 26 14:35:17 crc kubenswrapper[5037]: I1126 14:35:17.966739 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 26 14:35:17 crc kubenswrapper[5037]: I1126 14:35:17.966825 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 26 14:35:17 crc kubenswrapper[5037]: I1126 14:35:17.966843 5037 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-dhzkb" Nov 26 14:35:17 crc kubenswrapper[5037]: I1126 14:35:17.976012 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-dvzww"] Nov 26 14:35:18 crc kubenswrapper[5037]: I1126 14:35:18.140762 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-79fjl"] Nov 26 14:35:18 crc kubenswrapper[5037]: I1126 14:35:18.141966 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-79fjl" Nov 26 14:35:18 crc kubenswrapper[5037]: I1126 14:35:18.145733 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2696cbc1-003d-4c5b-9346-2f5434393ed7-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-dvzww\" (UID: \"2696cbc1-003d-4c5b-9346-2f5434393ed7\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-dvzww" Nov 26 14:35:18 crc kubenswrapper[5037]: I1126 14:35:18.145830 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5k8x\" (UniqueName: \"kubernetes.io/projected/2696cbc1-003d-4c5b-9346-2f5434393ed7-kube-api-access-g5k8x\") pod \"cert-manager-cainjector-855d9ccff4-dvzww\" (UID: \"2696cbc1-003d-4c5b-9346-2f5434393ed7\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-dvzww" Nov 26 14:35:18 crc kubenswrapper[5037]: I1126 14:35:18.158734 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-79fjl"] Nov 26 14:35:18 crc kubenswrapper[5037]: I1126 14:35:18.248632 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h4b6m\" (UniqueName: \"kubernetes.io/projected/5acef117-b98a-4445-aef3-eeeaf895e664-kube-api-access-h4b6m\") pod \"redhat-marketplace-79fjl\" (UID: \"5acef117-b98a-4445-aef3-eeeaf895e664\") " pod="openshift-marketplace/redhat-marketplace-79fjl" Nov 26 14:35:18 crc kubenswrapper[5037]: I1126 14:35:18.248725 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2696cbc1-003d-4c5b-9346-2f5434393ed7-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-dvzww\" (UID: \"2696cbc1-003d-4c5b-9346-2f5434393ed7\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-dvzww" Nov 26 14:35:18 crc kubenswrapper[5037]: I1126 14:35:18.248794 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5acef117-b98a-4445-aef3-eeeaf895e664-utilities\") pod \"redhat-marketplace-79fjl\" (UID: \"5acef117-b98a-4445-aef3-eeeaf895e664\") " pod="openshift-marketplace/redhat-marketplace-79fjl" Nov 26 14:35:18 crc kubenswrapper[5037]: I1126 14:35:18.248841 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5acef117-b98a-4445-aef3-eeeaf895e664-catalog-content\") pod \"redhat-marketplace-79fjl\" (UID: \"5acef117-b98a-4445-aef3-eeeaf895e664\") " pod="openshift-marketplace/redhat-marketplace-79fjl" Nov 26 14:35:18 crc kubenswrapper[5037]: I1126 14:35:18.248877 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5k8x\" (UniqueName: \"kubernetes.io/projected/2696cbc1-003d-4c5b-9346-2f5434393ed7-kube-api-access-g5k8x\") pod \"cert-manager-cainjector-855d9ccff4-dvzww\" (UID: \"2696cbc1-003d-4c5b-9346-2f5434393ed7\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-dvzww" Nov 26 14:35:18 crc kubenswrapper[5037]: I1126 14:35:18.267779 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5k8x\" (UniqueName: \"kubernetes.io/projected/2696cbc1-003d-4c5b-9346-2f5434393ed7-kube-api-access-g5k8x\") pod \"cert-manager-cainjector-855d9ccff4-dvzww\" (UID: \"2696cbc1-003d-4c5b-9346-2f5434393ed7\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-dvzww" Nov 26 14:35:18 crc kubenswrapper[5037]: I1126 14:35:18.268689 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2696cbc1-003d-4c5b-9346-2f5434393ed7-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-dvzww\" (UID: \"2696cbc1-003d-4c5b-9346-2f5434393ed7\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-dvzww" Nov 26 14:35:18 crc kubenswrapper[5037]: I1126 14:35:18.332825 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-855d9ccff4-dvzww" Nov 26 14:35:18 crc kubenswrapper[5037]: I1126 14:35:18.350711 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5acef117-b98a-4445-aef3-eeeaf895e664-utilities\") pod \"redhat-marketplace-79fjl\" (UID: \"5acef117-b98a-4445-aef3-eeeaf895e664\") " pod="openshift-marketplace/redhat-marketplace-79fjl" Nov 26 14:35:18 crc kubenswrapper[5037]: I1126 14:35:18.350757 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5acef117-b98a-4445-aef3-eeeaf895e664-catalog-content\") pod \"redhat-marketplace-79fjl\" (UID: \"5acef117-b98a-4445-aef3-eeeaf895e664\") " pod="openshift-marketplace/redhat-marketplace-79fjl" Nov 26 14:35:18 crc kubenswrapper[5037]: I1126 14:35:18.350823 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h4b6m\" (UniqueName: \"kubernetes.io/projected/5acef117-b98a-4445-aef3-eeeaf895e664-kube-api-access-h4b6m\") pod \"redhat-marketplace-79fjl\" (UID: \"5acef117-b98a-4445-aef3-eeeaf895e664\") " pod="openshift-marketplace/redhat-marketplace-79fjl" Nov 26 14:35:18 crc kubenswrapper[5037]: I1126 14:35:18.351473 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5acef117-b98a-4445-aef3-eeeaf895e664-utilities\") pod \"redhat-marketplace-79fjl\" (UID: \"5acef117-b98a-4445-aef3-eeeaf895e664\") " pod="openshift-marketplace/redhat-marketplace-79fjl" Nov 26 14:35:18 crc kubenswrapper[5037]: I1126 14:35:18.351489 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5acef117-b98a-4445-aef3-eeeaf895e664-catalog-content\") pod \"redhat-marketplace-79fjl\" (UID: \"5acef117-b98a-4445-aef3-eeeaf895e664\") " pod="openshift-marketplace/redhat-marketplace-79fjl" Nov 26 14:35:18 crc kubenswrapper[5037]: I1126 14:35:18.380068 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h4b6m\" (UniqueName: \"kubernetes.io/projected/5acef117-b98a-4445-aef3-eeeaf895e664-kube-api-access-h4b6m\") pod \"redhat-marketplace-79fjl\" (UID: \"5acef117-b98a-4445-aef3-eeeaf895e664\") " pod="openshift-marketplace/redhat-marketplace-79fjl" Nov 26 14:35:18 crc kubenswrapper[5037]: I1126 14:35:18.460380 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-79fjl" Nov 26 14:35:18 crc kubenswrapper[5037]: I1126 14:35:18.677566 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-5rq42" Nov 26 14:35:18 crc kubenswrapper[5037]: I1126 14:35:18.677932 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-5rq42" Nov 26 14:35:18 crc kubenswrapper[5037]: I1126 14:35:18.717209 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-5rq42" Nov 26 14:35:18 crc kubenswrapper[5037]: I1126 14:35:18.791839 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-dvzww"] Nov 26 14:35:18 crc kubenswrapper[5037]: I1126 14:35:18.899248 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-79fjl"] Nov 26 14:35:18 crc kubenswrapper[5037]: W1126 14:35:18.906878 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5acef117_b98a_4445_aef3_eeeaf895e664.slice/crio-79fcbe30754226e99ad1ae720ad8213457d9ad295f66553904bbdd949d604a06 WatchSource:0}: Error finding container 79fcbe30754226e99ad1ae720ad8213457d9ad295f66553904bbdd949d604a06: Status 404 returned error can't find the container with id 79fcbe30754226e99ad1ae720ad8213457d9ad295f66553904bbdd949d604a06 Nov 26 14:35:19 crc kubenswrapper[5037]: I1126 14:35:19.415461 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-dvzww" event={"ID":"2696cbc1-003d-4c5b-9346-2f5434393ed7","Type":"ContainerStarted","Data":"cccc424f5fd70b0c5c2e9c1ed8c58172f0ddfc00ffd5693ed4008ae050f3c89b"} Nov 26 14:35:19 crc kubenswrapper[5037]: I1126 14:35:19.416913 5037 generic.go:334] "Generic (PLEG): container finished" podID="5acef117-b98a-4445-aef3-eeeaf895e664" containerID="5b2b71c019cdc9b83908dc4263f71d737a6e47f3080249fed9cfe9866b125717" exitCode=0 Nov 26 14:35:19 crc kubenswrapper[5037]: I1126 14:35:19.416979 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-79fjl" event={"ID":"5acef117-b98a-4445-aef3-eeeaf895e664","Type":"ContainerDied","Data":"5b2b71c019cdc9b83908dc4263f71d737a6e47f3080249fed9cfe9866b125717"} Nov 26 14:35:19 crc kubenswrapper[5037]: I1126 14:35:19.417012 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-79fjl" event={"ID":"5acef117-b98a-4445-aef3-eeeaf895e664","Type":"ContainerStarted","Data":"79fcbe30754226e99ad1ae720ad8213457d9ad295f66553904bbdd949d604a06"} Nov 26 14:35:19 crc kubenswrapper[5037]: I1126 14:35:19.473524 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-5rq42" Nov 26 14:35:19 crc kubenswrapper[5037]: I1126 14:35:19.593916 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-4j52c"] Nov 26 14:35:19 crc kubenswrapper[5037]: I1126 14:35:19.594710 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-f4fb5df64-4j52c" Nov 26 14:35:19 crc kubenswrapper[5037]: I1126 14:35:19.598230 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-4j52c"] Nov 26 14:35:19 crc kubenswrapper[5037]: I1126 14:35:19.598789 5037 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-g9lg4" Nov 26 14:35:19 crc kubenswrapper[5037]: I1126 14:35:19.771388 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r4mnf\" (UniqueName: \"kubernetes.io/projected/fcdd281f-2196-4aa1-992a-5b275246be42-kube-api-access-r4mnf\") pod \"cert-manager-webhook-f4fb5df64-4j52c\" (UID: \"fcdd281f-2196-4aa1-992a-5b275246be42\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-4j52c" Nov 26 14:35:19 crc kubenswrapper[5037]: I1126 14:35:19.771596 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fcdd281f-2196-4aa1-992a-5b275246be42-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-4j52c\" (UID: \"fcdd281f-2196-4aa1-992a-5b275246be42\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-4j52c" Nov 26 14:35:19 crc kubenswrapper[5037]: I1126 14:35:19.873313 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fcdd281f-2196-4aa1-992a-5b275246be42-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-4j52c\" (UID: \"fcdd281f-2196-4aa1-992a-5b275246be42\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-4j52c" Nov 26 14:35:19 crc kubenswrapper[5037]: I1126 14:35:19.873371 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r4mnf\" (UniqueName: \"kubernetes.io/projected/fcdd281f-2196-4aa1-992a-5b275246be42-kube-api-access-r4mnf\") pod \"cert-manager-webhook-f4fb5df64-4j52c\" (UID: \"fcdd281f-2196-4aa1-992a-5b275246be42\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-4j52c" Nov 26 14:35:19 crc kubenswrapper[5037]: I1126 14:35:19.898893 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fcdd281f-2196-4aa1-992a-5b275246be42-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-4j52c\" (UID: \"fcdd281f-2196-4aa1-992a-5b275246be42\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-4j52c" Nov 26 14:35:19 crc kubenswrapper[5037]: I1126 14:35:19.900047 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r4mnf\" (UniqueName: \"kubernetes.io/projected/fcdd281f-2196-4aa1-992a-5b275246be42-kube-api-access-r4mnf\") pod \"cert-manager-webhook-f4fb5df64-4j52c\" (UID: \"fcdd281f-2196-4aa1-992a-5b275246be42\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-4j52c" Nov 26 14:35:19 crc kubenswrapper[5037]: I1126 14:35:19.912825 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-f4fb5df64-4j52c" Nov 26 14:35:20 crc kubenswrapper[5037]: I1126 14:35:20.381836 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-4j52c"] Nov 26 14:35:20 crc kubenswrapper[5037]: I1126 14:35:20.430579 5037 generic.go:334] "Generic (PLEG): container finished" podID="5acef117-b98a-4445-aef3-eeeaf895e664" containerID="b510ddff5c44e97ea8e9e166cc9cd9bc1736e4c702a5128b1c907df5d351db35" exitCode=0 Nov 26 14:35:20 crc kubenswrapper[5037]: I1126 14:35:20.430677 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-79fjl" event={"ID":"5acef117-b98a-4445-aef3-eeeaf895e664","Type":"ContainerDied","Data":"b510ddff5c44e97ea8e9e166cc9cd9bc1736e4c702a5128b1c907df5d351db35"} Nov 26 14:35:20 crc kubenswrapper[5037]: I1126 14:35:20.734394 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5rq42"] Nov 26 14:35:21 crc kubenswrapper[5037]: I1126 14:35:21.437460 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-f4fb5df64-4j52c" event={"ID":"fcdd281f-2196-4aa1-992a-5b275246be42","Type":"ContainerStarted","Data":"3c488bdf639abbbd721bd20da17673cfb131a85fb7c89dc88781a50ff54f205b"} Nov 26 14:35:21 crc kubenswrapper[5037]: I1126 14:35:21.441463 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-79fjl" event={"ID":"5acef117-b98a-4445-aef3-eeeaf895e664","Type":"ContainerStarted","Data":"9a08861762b5e31ded23ea9caf6a5d8728b332327147c1f4622894304dea7a4f"} Nov 26 14:35:21 crc kubenswrapper[5037]: I1126 14:35:21.441635 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-5rq42" podUID="0a146e4e-9143-4339-a5dd-99456ee68d8d" containerName="registry-server" containerID="cri-o://3ef238b98445175b5e5f59fb4be0de9b2bab4b5769d32d5f238d39eeeb7010c3" gracePeriod=2 Nov 26 14:35:21 crc kubenswrapper[5037]: I1126 14:35:21.463279 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-79fjl" podStartSLOduration=1.73444426 podStartE2EDuration="3.46326113s" podCreationTimestamp="2025-11-26 14:35:18 +0000 UTC" firstStartedPulling="2025-11-26 14:35:19.419028815 +0000 UTC m=+1186.215798999" lastFinishedPulling="2025-11-26 14:35:21.147845685 +0000 UTC m=+1187.944615869" observedRunningTime="2025-11-26 14:35:21.459585171 +0000 UTC m=+1188.256355395" watchObservedRunningTime="2025-11-26 14:35:21.46326113 +0000 UTC m=+1188.260031314" Nov 26 14:35:21 crc kubenswrapper[5037]: I1126 14:35:21.857885 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5rq42" Nov 26 14:35:22 crc kubenswrapper[5037]: I1126 14:35:22.008342 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a146e4e-9143-4339-a5dd-99456ee68d8d-utilities\") pod \"0a146e4e-9143-4339-a5dd-99456ee68d8d\" (UID: \"0a146e4e-9143-4339-a5dd-99456ee68d8d\") " Nov 26 14:35:22 crc kubenswrapper[5037]: I1126 14:35:22.008781 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ffkw7\" (UniqueName: \"kubernetes.io/projected/0a146e4e-9143-4339-a5dd-99456ee68d8d-kube-api-access-ffkw7\") pod \"0a146e4e-9143-4339-a5dd-99456ee68d8d\" (UID: \"0a146e4e-9143-4339-a5dd-99456ee68d8d\") " Nov 26 14:35:22 crc kubenswrapper[5037]: I1126 14:35:22.008841 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a146e4e-9143-4339-a5dd-99456ee68d8d-catalog-content\") pod \"0a146e4e-9143-4339-a5dd-99456ee68d8d\" (UID: \"0a146e4e-9143-4339-a5dd-99456ee68d8d\") " Nov 26 14:35:22 crc kubenswrapper[5037]: I1126 14:35:22.009694 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a146e4e-9143-4339-a5dd-99456ee68d8d-utilities" (OuterVolumeSpecName: "utilities") pod "0a146e4e-9143-4339-a5dd-99456ee68d8d" (UID: "0a146e4e-9143-4339-a5dd-99456ee68d8d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:35:22 crc kubenswrapper[5037]: I1126 14:35:22.014600 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a146e4e-9143-4339-a5dd-99456ee68d8d-kube-api-access-ffkw7" (OuterVolumeSpecName: "kube-api-access-ffkw7") pod "0a146e4e-9143-4339-a5dd-99456ee68d8d" (UID: "0a146e4e-9143-4339-a5dd-99456ee68d8d"). InnerVolumeSpecName "kube-api-access-ffkw7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:35:22 crc kubenswrapper[5037]: I1126 14:35:22.082032 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a146e4e-9143-4339-a5dd-99456ee68d8d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0a146e4e-9143-4339-a5dd-99456ee68d8d" (UID: "0a146e4e-9143-4339-a5dd-99456ee68d8d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:35:22 crc kubenswrapper[5037]: I1126 14:35:22.112064 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ffkw7\" (UniqueName: \"kubernetes.io/projected/0a146e4e-9143-4339-a5dd-99456ee68d8d-kube-api-access-ffkw7\") on node \"crc\" DevicePath \"\"" Nov 26 14:35:22 crc kubenswrapper[5037]: I1126 14:35:22.112099 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a146e4e-9143-4339-a5dd-99456ee68d8d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 14:35:22 crc kubenswrapper[5037]: I1126 14:35:22.112108 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a146e4e-9143-4339-a5dd-99456ee68d8d-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 14:35:22 crc kubenswrapper[5037]: I1126 14:35:22.457949 5037 generic.go:334] "Generic (PLEG): container finished" podID="0a146e4e-9143-4339-a5dd-99456ee68d8d" containerID="3ef238b98445175b5e5f59fb4be0de9b2bab4b5769d32d5f238d39eeeb7010c3" exitCode=0 Nov 26 14:35:22 crc kubenswrapper[5037]: I1126 14:35:22.459081 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5rq42" Nov 26 14:35:22 crc kubenswrapper[5037]: I1126 14:35:22.460194 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5rq42" event={"ID":"0a146e4e-9143-4339-a5dd-99456ee68d8d","Type":"ContainerDied","Data":"3ef238b98445175b5e5f59fb4be0de9b2bab4b5769d32d5f238d39eeeb7010c3"} Nov 26 14:35:22 crc kubenswrapper[5037]: I1126 14:35:22.460307 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5rq42" event={"ID":"0a146e4e-9143-4339-a5dd-99456ee68d8d","Type":"ContainerDied","Data":"98886218bddb657b486685e5cc7ce947101013ec6bdaf592e63171e9d8ab6334"} Nov 26 14:35:22 crc kubenswrapper[5037]: I1126 14:35:22.460332 5037 scope.go:117] "RemoveContainer" containerID="3ef238b98445175b5e5f59fb4be0de9b2bab4b5769d32d5f238d39eeeb7010c3" Nov 26 14:35:22 crc kubenswrapper[5037]: I1126 14:35:22.487179 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5rq42"] Nov 26 14:35:22 crc kubenswrapper[5037]: I1126 14:35:22.491594 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-5rq42"] Nov 26 14:35:23 crc kubenswrapper[5037]: I1126 14:35:23.918465 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a146e4e-9143-4339-a5dd-99456ee68d8d" path="/var/lib/kubelet/pods/0a146e4e-9143-4339-a5dd-99456ee68d8d/volumes" Nov 26 14:35:25 crc kubenswrapper[5037]: I1126 14:35:25.494406 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-86cb77c54b-dgkx5"] Nov 26 14:35:25 crc kubenswrapper[5037]: E1126 14:35:25.494639 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a146e4e-9143-4339-a5dd-99456ee68d8d" containerName="extract-content" Nov 26 14:35:25 crc kubenswrapper[5037]: I1126 14:35:25.494652 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a146e4e-9143-4339-a5dd-99456ee68d8d" containerName="extract-content" Nov 26 14:35:25 crc kubenswrapper[5037]: E1126 14:35:25.494662 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a146e4e-9143-4339-a5dd-99456ee68d8d" containerName="extract-utilities" Nov 26 14:35:25 crc kubenswrapper[5037]: I1126 14:35:25.494669 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a146e4e-9143-4339-a5dd-99456ee68d8d" containerName="extract-utilities" Nov 26 14:35:25 crc kubenswrapper[5037]: E1126 14:35:25.494679 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a146e4e-9143-4339-a5dd-99456ee68d8d" containerName="registry-server" Nov 26 14:35:25 crc kubenswrapper[5037]: I1126 14:35:25.494685 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a146e4e-9143-4339-a5dd-99456ee68d8d" containerName="registry-server" Nov 26 14:35:25 crc kubenswrapper[5037]: I1126 14:35:25.494801 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a146e4e-9143-4339-a5dd-99456ee68d8d" containerName="registry-server" Nov 26 14:35:25 crc kubenswrapper[5037]: I1126 14:35:25.495233 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-86cb77c54b-dgkx5" Nov 26 14:35:25 crc kubenswrapper[5037]: I1126 14:35:25.499431 5037 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-55bnw" Nov 26 14:35:25 crc kubenswrapper[5037]: I1126 14:35:25.515940 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-86cb77c54b-dgkx5"] Nov 26 14:35:25 crc kubenswrapper[5037]: I1126 14:35:25.574140 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5301fea1-84e2-4c3c-abdc-dc6464184277-bound-sa-token\") pod \"cert-manager-86cb77c54b-dgkx5\" (UID: \"5301fea1-84e2-4c3c-abdc-dc6464184277\") " pod="cert-manager/cert-manager-86cb77c54b-dgkx5" Nov 26 14:35:25 crc kubenswrapper[5037]: I1126 14:35:25.574200 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cgtf2\" (UniqueName: \"kubernetes.io/projected/5301fea1-84e2-4c3c-abdc-dc6464184277-kube-api-access-cgtf2\") pod \"cert-manager-86cb77c54b-dgkx5\" (UID: \"5301fea1-84e2-4c3c-abdc-dc6464184277\") " pod="cert-manager/cert-manager-86cb77c54b-dgkx5" Nov 26 14:35:25 crc kubenswrapper[5037]: I1126 14:35:25.675511 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5301fea1-84e2-4c3c-abdc-dc6464184277-bound-sa-token\") pod \"cert-manager-86cb77c54b-dgkx5\" (UID: \"5301fea1-84e2-4c3c-abdc-dc6464184277\") " pod="cert-manager/cert-manager-86cb77c54b-dgkx5" Nov 26 14:35:25 crc kubenswrapper[5037]: I1126 14:35:25.675635 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cgtf2\" (UniqueName: \"kubernetes.io/projected/5301fea1-84e2-4c3c-abdc-dc6464184277-kube-api-access-cgtf2\") pod \"cert-manager-86cb77c54b-dgkx5\" (UID: \"5301fea1-84e2-4c3c-abdc-dc6464184277\") " pod="cert-manager/cert-manager-86cb77c54b-dgkx5" Nov 26 14:35:25 crc kubenswrapper[5037]: I1126 14:35:25.710229 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5301fea1-84e2-4c3c-abdc-dc6464184277-bound-sa-token\") pod \"cert-manager-86cb77c54b-dgkx5\" (UID: \"5301fea1-84e2-4c3c-abdc-dc6464184277\") " pod="cert-manager/cert-manager-86cb77c54b-dgkx5" Nov 26 14:35:25 crc kubenswrapper[5037]: I1126 14:35:25.720835 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cgtf2\" (UniqueName: \"kubernetes.io/projected/5301fea1-84e2-4c3c-abdc-dc6464184277-kube-api-access-cgtf2\") pod \"cert-manager-86cb77c54b-dgkx5\" (UID: \"5301fea1-84e2-4c3c-abdc-dc6464184277\") " pod="cert-manager/cert-manager-86cb77c54b-dgkx5" Nov 26 14:35:25 crc kubenswrapper[5037]: I1126 14:35:25.824885 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-86cb77c54b-dgkx5" Nov 26 14:35:27 crc kubenswrapper[5037]: I1126 14:35:27.565725 5037 scope.go:117] "RemoveContainer" containerID="1f47654af340cae4c4487da5b2d39a7ece41b264bd8e9852e3a2b0208fbbd717" Nov 26 14:35:27 crc kubenswrapper[5037]: I1126 14:35:27.587553 5037 scope.go:117] "RemoveContainer" containerID="84ffdcda98a3daeadc43696bd71213042440601a70d7e08d4227181371fa90a9" Nov 26 14:35:27 crc kubenswrapper[5037]: I1126 14:35:27.695851 5037 scope.go:117] "RemoveContainer" containerID="3ef238b98445175b5e5f59fb4be0de9b2bab4b5769d32d5f238d39eeeb7010c3" Nov 26 14:35:27 crc kubenswrapper[5037]: E1126 14:35:27.696373 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3ef238b98445175b5e5f59fb4be0de9b2bab4b5769d32d5f238d39eeeb7010c3\": container with ID starting with 3ef238b98445175b5e5f59fb4be0de9b2bab4b5769d32d5f238d39eeeb7010c3 not found: ID does not exist" containerID="3ef238b98445175b5e5f59fb4be0de9b2bab4b5769d32d5f238d39eeeb7010c3" Nov 26 14:35:27 crc kubenswrapper[5037]: I1126 14:35:27.696418 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3ef238b98445175b5e5f59fb4be0de9b2bab4b5769d32d5f238d39eeeb7010c3"} err="failed to get container status \"3ef238b98445175b5e5f59fb4be0de9b2bab4b5769d32d5f238d39eeeb7010c3\": rpc error: code = NotFound desc = could not find container \"3ef238b98445175b5e5f59fb4be0de9b2bab4b5769d32d5f238d39eeeb7010c3\": container with ID starting with 3ef238b98445175b5e5f59fb4be0de9b2bab4b5769d32d5f238d39eeeb7010c3 not found: ID does not exist" Nov 26 14:35:27 crc kubenswrapper[5037]: I1126 14:35:27.696447 5037 scope.go:117] "RemoveContainer" containerID="1f47654af340cae4c4487da5b2d39a7ece41b264bd8e9852e3a2b0208fbbd717" Nov 26 14:35:27 crc kubenswrapper[5037]: E1126 14:35:27.696927 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1f47654af340cae4c4487da5b2d39a7ece41b264bd8e9852e3a2b0208fbbd717\": container with ID starting with 1f47654af340cae4c4487da5b2d39a7ece41b264bd8e9852e3a2b0208fbbd717 not found: ID does not exist" containerID="1f47654af340cae4c4487da5b2d39a7ece41b264bd8e9852e3a2b0208fbbd717" Nov 26 14:35:27 crc kubenswrapper[5037]: I1126 14:35:27.696960 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1f47654af340cae4c4487da5b2d39a7ece41b264bd8e9852e3a2b0208fbbd717"} err="failed to get container status \"1f47654af340cae4c4487da5b2d39a7ece41b264bd8e9852e3a2b0208fbbd717\": rpc error: code = NotFound desc = could not find container \"1f47654af340cae4c4487da5b2d39a7ece41b264bd8e9852e3a2b0208fbbd717\": container with ID starting with 1f47654af340cae4c4487da5b2d39a7ece41b264bd8e9852e3a2b0208fbbd717 not found: ID does not exist" Nov 26 14:35:27 crc kubenswrapper[5037]: I1126 14:35:27.696978 5037 scope.go:117] "RemoveContainer" containerID="84ffdcda98a3daeadc43696bd71213042440601a70d7e08d4227181371fa90a9" Nov 26 14:35:27 crc kubenswrapper[5037]: E1126 14:35:27.697213 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"84ffdcda98a3daeadc43696bd71213042440601a70d7e08d4227181371fa90a9\": container with ID starting with 84ffdcda98a3daeadc43696bd71213042440601a70d7e08d4227181371fa90a9 not found: ID does not exist" containerID="84ffdcda98a3daeadc43696bd71213042440601a70d7e08d4227181371fa90a9" Nov 26 14:35:27 crc kubenswrapper[5037]: I1126 14:35:27.697249 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"84ffdcda98a3daeadc43696bd71213042440601a70d7e08d4227181371fa90a9"} err="failed to get container status \"84ffdcda98a3daeadc43696bd71213042440601a70d7e08d4227181371fa90a9\": rpc error: code = NotFound desc = could not find container \"84ffdcda98a3daeadc43696bd71213042440601a70d7e08d4227181371fa90a9\": container with ID starting with 84ffdcda98a3daeadc43696bd71213042440601a70d7e08d4227181371fa90a9 not found: ID does not exist" Nov 26 14:35:27 crc kubenswrapper[5037]: I1126 14:35:27.785112 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-86cb77c54b-dgkx5"] Nov 26 14:35:28 crc kubenswrapper[5037]: I1126 14:35:28.461609 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-79fjl" Nov 26 14:35:28 crc kubenswrapper[5037]: I1126 14:35:28.461645 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-79fjl" Nov 26 14:35:28 crc kubenswrapper[5037]: I1126 14:35:28.504369 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-dvzww" event={"ID":"2696cbc1-003d-4c5b-9346-2f5434393ed7","Type":"ContainerStarted","Data":"7d50369c13b8b6c231aa2453dfedcdb4228925049b77701cf8659a1f4979d894"} Nov 26 14:35:28 crc kubenswrapper[5037]: I1126 14:35:28.507113 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-dgkx5" event={"ID":"5301fea1-84e2-4c3c-abdc-dc6464184277","Type":"ContainerStarted","Data":"bc7831ce991ad4069e1b11545c9532e61e8ef3fbdfc32e7b1653bee5273f32e8"} Nov 26 14:35:28 crc kubenswrapper[5037]: I1126 14:35:28.507138 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-dgkx5" event={"ID":"5301fea1-84e2-4c3c-abdc-dc6464184277","Type":"ContainerStarted","Data":"bac621c3430323aa2b8b665383633056d0b78b474f1876cc3b1713462d2dc461"} Nov 26 14:35:28 crc kubenswrapper[5037]: I1126 14:35:28.509503 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-f4fb5df64-4j52c" event={"ID":"fcdd281f-2196-4aa1-992a-5b275246be42","Type":"ContainerStarted","Data":"e4aae0dee8468e697d8ae0663d219492d2d9f7dc8c9fadf7966ec9c6a4427434"} Nov 26 14:35:28 crc kubenswrapper[5037]: I1126 14:35:28.509618 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-f4fb5df64-4j52c" Nov 26 14:35:28 crc kubenswrapper[5037]: I1126 14:35:28.511895 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-79fjl" Nov 26 14:35:28 crc kubenswrapper[5037]: I1126 14:35:28.528177 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-855d9ccff4-dvzww" podStartSLOduration=2.590649747 podStartE2EDuration="11.528150625s" podCreationTimestamp="2025-11-26 14:35:17 +0000 UTC" firstStartedPulling="2025-11-26 14:35:18.794777537 +0000 UTC m=+1185.591547721" lastFinishedPulling="2025-11-26 14:35:27.732278415 +0000 UTC m=+1194.529048599" observedRunningTime="2025-11-26 14:35:28.525351237 +0000 UTC m=+1195.322121431" watchObservedRunningTime="2025-11-26 14:35:28.528150625 +0000 UTC m=+1195.324920819" Nov 26 14:35:28 crc kubenswrapper[5037]: I1126 14:35:28.599157 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-f4fb5df64-4j52c" podStartSLOduration=2.26880155 podStartE2EDuration="9.599131129s" podCreationTimestamp="2025-11-26 14:35:19 +0000 UTC" firstStartedPulling="2025-11-26 14:35:20.425986323 +0000 UTC m=+1187.222756507" lastFinishedPulling="2025-11-26 14:35:27.756315902 +0000 UTC m=+1194.553086086" observedRunningTime="2025-11-26 14:35:28.58481478 +0000 UTC m=+1195.381584994" watchObservedRunningTime="2025-11-26 14:35:28.599131129 +0000 UTC m=+1195.395901313" Nov 26 14:35:28 crc kubenswrapper[5037]: I1126 14:35:28.603374 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-79fjl" Nov 26 14:35:28 crc kubenswrapper[5037]: I1126 14:35:28.605563 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-86cb77c54b-dgkx5" podStartSLOduration=3.605543786 podStartE2EDuration="3.605543786s" podCreationTimestamp="2025-11-26 14:35:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:35:28.603554837 +0000 UTC m=+1195.400325021" watchObservedRunningTime="2025-11-26 14:35:28.605543786 +0000 UTC m=+1195.402313970" Nov 26 14:35:30 crc kubenswrapper[5037]: I1126 14:35:30.932441 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-79fjl"] Nov 26 14:35:30 crc kubenswrapper[5037]: I1126 14:35:30.933035 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-79fjl" podUID="5acef117-b98a-4445-aef3-eeeaf895e664" containerName="registry-server" containerID="cri-o://9a08861762b5e31ded23ea9caf6a5d8728b332327147c1f4622894304dea7a4f" gracePeriod=2 Nov 26 14:35:31 crc kubenswrapper[5037]: I1126 14:35:31.531497 5037 generic.go:334] "Generic (PLEG): container finished" podID="5acef117-b98a-4445-aef3-eeeaf895e664" containerID="9a08861762b5e31ded23ea9caf6a5d8728b332327147c1f4622894304dea7a4f" exitCode=0 Nov 26 14:35:31 crc kubenswrapper[5037]: I1126 14:35:31.531526 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-79fjl" event={"ID":"5acef117-b98a-4445-aef3-eeeaf895e664","Type":"ContainerDied","Data":"9a08861762b5e31ded23ea9caf6a5d8728b332327147c1f4622894304dea7a4f"} Nov 26 14:35:31 crc kubenswrapper[5037]: I1126 14:35:31.812342 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-79fjl" Nov 26 14:35:31 crc kubenswrapper[5037]: I1126 14:35:31.857999 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5acef117-b98a-4445-aef3-eeeaf895e664-catalog-content\") pod \"5acef117-b98a-4445-aef3-eeeaf895e664\" (UID: \"5acef117-b98a-4445-aef3-eeeaf895e664\") " Nov 26 14:35:31 crc kubenswrapper[5037]: I1126 14:35:31.858042 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5acef117-b98a-4445-aef3-eeeaf895e664-utilities\") pod \"5acef117-b98a-4445-aef3-eeeaf895e664\" (UID: \"5acef117-b98a-4445-aef3-eeeaf895e664\") " Nov 26 14:35:31 crc kubenswrapper[5037]: I1126 14:35:31.858076 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h4b6m\" (UniqueName: \"kubernetes.io/projected/5acef117-b98a-4445-aef3-eeeaf895e664-kube-api-access-h4b6m\") pod \"5acef117-b98a-4445-aef3-eeeaf895e664\" (UID: \"5acef117-b98a-4445-aef3-eeeaf895e664\") " Nov 26 14:35:31 crc kubenswrapper[5037]: I1126 14:35:31.859328 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5acef117-b98a-4445-aef3-eeeaf895e664-utilities" (OuterVolumeSpecName: "utilities") pod "5acef117-b98a-4445-aef3-eeeaf895e664" (UID: "5acef117-b98a-4445-aef3-eeeaf895e664"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:35:31 crc kubenswrapper[5037]: I1126 14:35:31.866784 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5acef117-b98a-4445-aef3-eeeaf895e664-kube-api-access-h4b6m" (OuterVolumeSpecName: "kube-api-access-h4b6m") pod "5acef117-b98a-4445-aef3-eeeaf895e664" (UID: "5acef117-b98a-4445-aef3-eeeaf895e664"). InnerVolumeSpecName "kube-api-access-h4b6m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:35:31 crc kubenswrapper[5037]: I1126 14:35:31.874998 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5acef117-b98a-4445-aef3-eeeaf895e664-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5acef117-b98a-4445-aef3-eeeaf895e664" (UID: "5acef117-b98a-4445-aef3-eeeaf895e664"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:35:31 crc kubenswrapper[5037]: I1126 14:35:31.960301 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5acef117-b98a-4445-aef3-eeeaf895e664-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 14:35:31 crc kubenswrapper[5037]: I1126 14:35:31.960343 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5acef117-b98a-4445-aef3-eeeaf895e664-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 14:35:31 crc kubenswrapper[5037]: I1126 14:35:31.960355 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h4b6m\" (UniqueName: \"kubernetes.io/projected/5acef117-b98a-4445-aef3-eeeaf895e664-kube-api-access-h4b6m\") on node \"crc\" DevicePath \"\"" Nov 26 14:35:32 crc kubenswrapper[5037]: I1126 14:35:32.542189 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-79fjl" event={"ID":"5acef117-b98a-4445-aef3-eeeaf895e664","Type":"ContainerDied","Data":"79fcbe30754226e99ad1ae720ad8213457d9ad295f66553904bbdd949d604a06"} Nov 26 14:35:32 crc kubenswrapper[5037]: I1126 14:35:32.542558 5037 scope.go:117] "RemoveContainer" containerID="9a08861762b5e31ded23ea9caf6a5d8728b332327147c1f4622894304dea7a4f" Nov 26 14:35:32 crc kubenswrapper[5037]: I1126 14:35:32.542307 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-79fjl" Nov 26 14:35:32 crc kubenswrapper[5037]: I1126 14:35:32.568268 5037 scope.go:117] "RemoveContainer" containerID="b510ddff5c44e97ea8e9e166cc9cd9bc1736e4c702a5128b1c907df5d351db35" Nov 26 14:35:32 crc kubenswrapper[5037]: I1126 14:35:32.572375 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-79fjl"] Nov 26 14:35:32 crc kubenswrapper[5037]: I1126 14:35:32.577970 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-79fjl"] Nov 26 14:35:32 crc kubenswrapper[5037]: I1126 14:35:32.585820 5037 scope.go:117] "RemoveContainer" containerID="5b2b71c019cdc9b83908dc4263f71d737a6e47f3080249fed9cfe9866b125717" Nov 26 14:35:33 crc kubenswrapper[5037]: I1126 14:35:33.918925 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5acef117-b98a-4445-aef3-eeeaf895e664" path="/var/lib/kubelet/pods/5acef117-b98a-4445-aef3-eeeaf895e664/volumes" Nov 26 14:35:34 crc kubenswrapper[5037]: I1126 14:35:34.914810 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-f4fb5df64-4j52c" Nov 26 14:35:42 crc kubenswrapper[5037]: I1126 14:35:42.355496 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-vkjsk"] Nov 26 14:35:42 crc kubenswrapper[5037]: E1126 14:35:42.356937 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5acef117-b98a-4445-aef3-eeeaf895e664" containerName="registry-server" Nov 26 14:35:42 crc kubenswrapper[5037]: I1126 14:35:42.356975 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="5acef117-b98a-4445-aef3-eeeaf895e664" containerName="registry-server" Nov 26 14:35:42 crc kubenswrapper[5037]: E1126 14:35:42.357014 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5acef117-b98a-4445-aef3-eeeaf895e664" containerName="extract-utilities" Nov 26 14:35:42 crc kubenswrapper[5037]: I1126 14:35:42.357032 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="5acef117-b98a-4445-aef3-eeeaf895e664" containerName="extract-utilities" Nov 26 14:35:42 crc kubenswrapper[5037]: E1126 14:35:42.357052 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5acef117-b98a-4445-aef3-eeeaf895e664" containerName="extract-content" Nov 26 14:35:42 crc kubenswrapper[5037]: I1126 14:35:42.357070 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="5acef117-b98a-4445-aef3-eeeaf895e664" containerName="extract-content" Nov 26 14:35:42 crc kubenswrapper[5037]: I1126 14:35:42.357388 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="5acef117-b98a-4445-aef3-eeeaf895e664" containerName="registry-server" Nov 26 14:35:42 crc kubenswrapper[5037]: I1126 14:35:42.358243 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-vkjsk" Nov 26 14:35:42 crc kubenswrapper[5037]: I1126 14:35:42.367742 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 26 14:35:42 crc kubenswrapper[5037]: I1126 14:35:42.368163 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 26 14:35:42 crc kubenswrapper[5037]: I1126 14:35:42.370583 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-77lf2" Nov 26 14:35:42 crc kubenswrapper[5037]: I1126 14:35:42.378015 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-vkjsk"] Nov 26 14:35:42 crc kubenswrapper[5037]: I1126 14:35:42.425475 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fw22d\" (UniqueName: \"kubernetes.io/projected/0cc7429f-e604-4373-9ef6-3baeee4b9519-kube-api-access-fw22d\") pod \"openstack-operator-index-vkjsk\" (UID: \"0cc7429f-e604-4373-9ef6-3baeee4b9519\") " pod="openstack-operators/openstack-operator-index-vkjsk" Nov 26 14:35:42 crc kubenswrapper[5037]: I1126 14:35:42.526981 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fw22d\" (UniqueName: \"kubernetes.io/projected/0cc7429f-e604-4373-9ef6-3baeee4b9519-kube-api-access-fw22d\") pod \"openstack-operator-index-vkjsk\" (UID: \"0cc7429f-e604-4373-9ef6-3baeee4b9519\") " pod="openstack-operators/openstack-operator-index-vkjsk" Nov 26 14:35:42 crc kubenswrapper[5037]: I1126 14:35:42.555774 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fw22d\" (UniqueName: \"kubernetes.io/projected/0cc7429f-e604-4373-9ef6-3baeee4b9519-kube-api-access-fw22d\") pod \"openstack-operator-index-vkjsk\" (UID: \"0cc7429f-e604-4373-9ef6-3baeee4b9519\") " pod="openstack-operators/openstack-operator-index-vkjsk" Nov 26 14:35:42 crc kubenswrapper[5037]: I1126 14:35:42.692431 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-vkjsk" Nov 26 14:35:43 crc kubenswrapper[5037]: I1126 14:35:43.124216 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-vkjsk"] Nov 26 14:35:43 crc kubenswrapper[5037]: W1126 14:35:43.130690 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0cc7429f_e604_4373_9ef6_3baeee4b9519.slice/crio-ea23b71eda9bb6ead603d6070bfb2a1f99234dc5d57c4fbfc11075454363355d WatchSource:0}: Error finding container ea23b71eda9bb6ead603d6070bfb2a1f99234dc5d57c4fbfc11075454363355d: Status 404 returned error can't find the container with id ea23b71eda9bb6ead603d6070bfb2a1f99234dc5d57c4fbfc11075454363355d Nov 26 14:35:43 crc kubenswrapper[5037]: I1126 14:35:43.632957 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-vkjsk" event={"ID":"0cc7429f-e604-4373-9ef6-3baeee4b9519","Type":"ContainerStarted","Data":"ea23b71eda9bb6ead603d6070bfb2a1f99234dc5d57c4fbfc11075454363355d"} Nov 26 14:35:46 crc kubenswrapper[5037]: I1126 14:35:46.650948 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-vkjsk" event={"ID":"0cc7429f-e604-4373-9ef6-3baeee4b9519","Type":"ContainerStarted","Data":"25a36d0cfdebdeabf11de93e7c67789541a00d1d818936f5d8494e1a1a3862d5"} Nov 26 14:35:46 crc kubenswrapper[5037]: I1126 14:35:46.666320 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-vkjsk" podStartSLOduration=2.02378161 podStartE2EDuration="4.666270468s" podCreationTimestamp="2025-11-26 14:35:42 +0000 UTC" firstStartedPulling="2025-11-26 14:35:43.133884352 +0000 UTC m=+1209.930654566" lastFinishedPulling="2025-11-26 14:35:45.77637324 +0000 UTC m=+1212.573143424" observedRunningTime="2025-11-26 14:35:46.663725566 +0000 UTC m=+1213.460495800" watchObservedRunningTime="2025-11-26 14:35:46.666270468 +0000 UTC m=+1213.463040662" Nov 26 14:35:47 crc kubenswrapper[5037]: I1126 14:35:47.746783 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-vkjsk"] Nov 26 14:35:48 crc kubenswrapper[5037]: I1126 14:35:48.342469 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-zc8jb"] Nov 26 14:35:48 crc kubenswrapper[5037]: I1126 14:35:48.343435 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-zc8jb" Nov 26 14:35:48 crc kubenswrapper[5037]: I1126 14:35:48.352511 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-zc8jb"] Nov 26 14:35:48 crc kubenswrapper[5037]: I1126 14:35:48.421879 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m5db7\" (UniqueName: \"kubernetes.io/projected/3c07b8c9-2517-4830-9455-208774f73353-kube-api-access-m5db7\") pod \"openstack-operator-index-zc8jb\" (UID: \"3c07b8c9-2517-4830-9455-208774f73353\") " pod="openstack-operators/openstack-operator-index-zc8jb" Nov 26 14:35:48 crc kubenswrapper[5037]: I1126 14:35:48.523951 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m5db7\" (UniqueName: \"kubernetes.io/projected/3c07b8c9-2517-4830-9455-208774f73353-kube-api-access-m5db7\") pod \"openstack-operator-index-zc8jb\" (UID: \"3c07b8c9-2517-4830-9455-208774f73353\") " pod="openstack-operators/openstack-operator-index-zc8jb" Nov 26 14:35:48 crc kubenswrapper[5037]: I1126 14:35:48.553097 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m5db7\" (UniqueName: \"kubernetes.io/projected/3c07b8c9-2517-4830-9455-208774f73353-kube-api-access-m5db7\") pod \"openstack-operator-index-zc8jb\" (UID: \"3c07b8c9-2517-4830-9455-208774f73353\") " pod="openstack-operators/openstack-operator-index-zc8jb" Nov 26 14:35:48 crc kubenswrapper[5037]: I1126 14:35:48.659514 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-zc8jb" Nov 26 14:35:48 crc kubenswrapper[5037]: I1126 14:35:48.668872 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-vkjsk" podUID="0cc7429f-e604-4373-9ef6-3baeee4b9519" containerName="registry-server" containerID="cri-o://25a36d0cfdebdeabf11de93e7c67789541a00d1d818936f5d8494e1a1a3862d5" gracePeriod=2 Nov 26 14:35:48 crc kubenswrapper[5037]: I1126 14:35:48.902231 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-zc8jb"] Nov 26 14:35:49 crc kubenswrapper[5037]: I1126 14:35:49.009192 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-vkjsk" Nov 26 14:35:49 crc kubenswrapper[5037]: I1126 14:35:49.133512 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fw22d\" (UniqueName: \"kubernetes.io/projected/0cc7429f-e604-4373-9ef6-3baeee4b9519-kube-api-access-fw22d\") pod \"0cc7429f-e604-4373-9ef6-3baeee4b9519\" (UID: \"0cc7429f-e604-4373-9ef6-3baeee4b9519\") " Nov 26 14:35:49 crc kubenswrapper[5037]: I1126 14:35:49.150568 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0cc7429f-e604-4373-9ef6-3baeee4b9519-kube-api-access-fw22d" (OuterVolumeSpecName: "kube-api-access-fw22d") pod "0cc7429f-e604-4373-9ef6-3baeee4b9519" (UID: "0cc7429f-e604-4373-9ef6-3baeee4b9519"). InnerVolumeSpecName "kube-api-access-fw22d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:35:49 crc kubenswrapper[5037]: I1126 14:35:49.235378 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fw22d\" (UniqueName: \"kubernetes.io/projected/0cc7429f-e604-4373-9ef6-3baeee4b9519-kube-api-access-fw22d\") on node \"crc\" DevicePath \"\"" Nov 26 14:35:49 crc kubenswrapper[5037]: I1126 14:35:49.676339 5037 generic.go:334] "Generic (PLEG): container finished" podID="0cc7429f-e604-4373-9ef6-3baeee4b9519" containerID="25a36d0cfdebdeabf11de93e7c67789541a00d1d818936f5d8494e1a1a3862d5" exitCode=0 Nov 26 14:35:49 crc kubenswrapper[5037]: I1126 14:35:49.676420 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-vkjsk" event={"ID":"0cc7429f-e604-4373-9ef6-3baeee4b9519","Type":"ContainerDied","Data":"25a36d0cfdebdeabf11de93e7c67789541a00d1d818936f5d8494e1a1a3862d5"} Nov 26 14:35:49 crc kubenswrapper[5037]: I1126 14:35:49.676423 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-vkjsk" Nov 26 14:35:49 crc kubenswrapper[5037]: I1126 14:35:49.676450 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-vkjsk" event={"ID":"0cc7429f-e604-4373-9ef6-3baeee4b9519","Type":"ContainerDied","Data":"ea23b71eda9bb6ead603d6070bfb2a1f99234dc5d57c4fbfc11075454363355d"} Nov 26 14:35:49 crc kubenswrapper[5037]: I1126 14:35:49.676472 5037 scope.go:117] "RemoveContainer" containerID="25a36d0cfdebdeabf11de93e7c67789541a00d1d818936f5d8494e1a1a3862d5" Nov 26 14:35:49 crc kubenswrapper[5037]: I1126 14:35:49.679054 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-zc8jb" event={"ID":"3c07b8c9-2517-4830-9455-208774f73353","Type":"ContainerStarted","Data":"08faea4e3cd302ca251f9f302119f99acb7521cd17833b6541472a83d0b87010"} Nov 26 14:35:49 crc kubenswrapper[5037]: I1126 14:35:49.679096 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-zc8jb" event={"ID":"3c07b8c9-2517-4830-9455-208774f73353","Type":"ContainerStarted","Data":"e3f311e4c1d306a2f7b27a66895f39540f975ae6f5bb05e3845b3172ad47620f"} Nov 26 14:35:49 crc kubenswrapper[5037]: I1126 14:35:49.698855 5037 scope.go:117] "RemoveContainer" containerID="25a36d0cfdebdeabf11de93e7c67789541a00d1d818936f5d8494e1a1a3862d5" Nov 26 14:35:49 crc kubenswrapper[5037]: E1126 14:35:49.699918 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"25a36d0cfdebdeabf11de93e7c67789541a00d1d818936f5d8494e1a1a3862d5\": container with ID starting with 25a36d0cfdebdeabf11de93e7c67789541a00d1d818936f5d8494e1a1a3862d5 not found: ID does not exist" containerID="25a36d0cfdebdeabf11de93e7c67789541a00d1d818936f5d8494e1a1a3862d5" Nov 26 14:35:49 crc kubenswrapper[5037]: I1126 14:35:49.699996 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"25a36d0cfdebdeabf11de93e7c67789541a00d1d818936f5d8494e1a1a3862d5"} err="failed to get container status \"25a36d0cfdebdeabf11de93e7c67789541a00d1d818936f5d8494e1a1a3862d5\": rpc error: code = NotFound desc = could not find container \"25a36d0cfdebdeabf11de93e7c67789541a00d1d818936f5d8494e1a1a3862d5\": container with ID starting with 25a36d0cfdebdeabf11de93e7c67789541a00d1d818936f5d8494e1a1a3862d5 not found: ID does not exist" Nov 26 14:35:49 crc kubenswrapper[5037]: I1126 14:35:49.702741 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-zc8jb" podStartSLOduration=1.258126279 podStartE2EDuration="1.702718469s" podCreationTimestamp="2025-11-26 14:35:48 +0000 UTC" firstStartedPulling="2025-11-26 14:35:48.909666518 +0000 UTC m=+1215.706436702" lastFinishedPulling="2025-11-26 14:35:49.354258698 +0000 UTC m=+1216.151028892" observedRunningTime="2025-11-26 14:35:49.696717843 +0000 UTC m=+1216.493488067" watchObservedRunningTime="2025-11-26 14:35:49.702718469 +0000 UTC m=+1216.499488663" Nov 26 14:35:49 crc kubenswrapper[5037]: I1126 14:35:49.727893 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-vkjsk"] Nov 26 14:35:49 crc kubenswrapper[5037]: I1126 14:35:49.740743 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-vkjsk"] Nov 26 14:35:49 crc kubenswrapper[5037]: I1126 14:35:49.922151 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0cc7429f-e604-4373-9ef6-3baeee4b9519" path="/var/lib/kubelet/pods/0cc7429f-e604-4373-9ef6-3baeee4b9519/volumes" Nov 26 14:35:58 crc kubenswrapper[5037]: I1126 14:35:58.660041 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-zc8jb" Nov 26 14:35:58 crc kubenswrapper[5037]: I1126 14:35:58.660484 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-zc8jb" Nov 26 14:35:58 crc kubenswrapper[5037]: I1126 14:35:58.710492 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-zc8jb" Nov 26 14:35:58 crc kubenswrapper[5037]: I1126 14:35:58.772219 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-zc8jb" Nov 26 14:36:00 crc kubenswrapper[5037]: I1126 14:36:00.184564 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5aqnw26"] Nov 26 14:36:00 crc kubenswrapper[5037]: E1126 14:36:00.185222 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0cc7429f-e604-4373-9ef6-3baeee4b9519" containerName="registry-server" Nov 26 14:36:00 crc kubenswrapper[5037]: I1126 14:36:00.185238 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="0cc7429f-e604-4373-9ef6-3baeee4b9519" containerName="registry-server" Nov 26 14:36:00 crc kubenswrapper[5037]: I1126 14:36:00.185360 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="0cc7429f-e604-4373-9ef6-3baeee4b9519" containerName="registry-server" Nov 26 14:36:00 crc kubenswrapper[5037]: I1126 14:36:00.186272 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5aqnw26" Nov 26 14:36:00 crc kubenswrapper[5037]: I1126 14:36:00.188124 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-nkc5v" Nov 26 14:36:00 crc kubenswrapper[5037]: I1126 14:36:00.212260 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5aqnw26"] Nov 26 14:36:00 crc kubenswrapper[5037]: I1126 14:36:00.310524 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pvhzc\" (UniqueName: \"kubernetes.io/projected/d69cc4af-4483-4634-a1f1-b15253c7d42c-kube-api-access-pvhzc\") pod \"3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5aqnw26\" (UID: \"d69cc4af-4483-4634-a1f1-b15253c7d42c\") " pod="openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5aqnw26" Nov 26 14:36:00 crc kubenswrapper[5037]: I1126 14:36:00.310594 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d69cc4af-4483-4634-a1f1-b15253c7d42c-util\") pod \"3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5aqnw26\" (UID: \"d69cc4af-4483-4634-a1f1-b15253c7d42c\") " pod="openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5aqnw26" Nov 26 14:36:00 crc kubenswrapper[5037]: I1126 14:36:00.310685 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d69cc4af-4483-4634-a1f1-b15253c7d42c-bundle\") pod \"3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5aqnw26\" (UID: \"d69cc4af-4483-4634-a1f1-b15253c7d42c\") " pod="openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5aqnw26" Nov 26 14:36:00 crc kubenswrapper[5037]: I1126 14:36:00.412358 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pvhzc\" (UniqueName: \"kubernetes.io/projected/d69cc4af-4483-4634-a1f1-b15253c7d42c-kube-api-access-pvhzc\") pod \"3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5aqnw26\" (UID: \"d69cc4af-4483-4634-a1f1-b15253c7d42c\") " pod="openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5aqnw26" Nov 26 14:36:00 crc kubenswrapper[5037]: I1126 14:36:00.412411 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d69cc4af-4483-4634-a1f1-b15253c7d42c-util\") pod \"3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5aqnw26\" (UID: \"d69cc4af-4483-4634-a1f1-b15253c7d42c\") " pod="openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5aqnw26" Nov 26 14:36:00 crc kubenswrapper[5037]: I1126 14:36:00.412465 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d69cc4af-4483-4634-a1f1-b15253c7d42c-bundle\") pod \"3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5aqnw26\" (UID: \"d69cc4af-4483-4634-a1f1-b15253c7d42c\") " pod="openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5aqnw26" Nov 26 14:36:00 crc kubenswrapper[5037]: I1126 14:36:00.412908 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d69cc4af-4483-4634-a1f1-b15253c7d42c-bundle\") pod \"3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5aqnw26\" (UID: \"d69cc4af-4483-4634-a1f1-b15253c7d42c\") " pod="openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5aqnw26" Nov 26 14:36:00 crc kubenswrapper[5037]: I1126 14:36:00.412936 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d69cc4af-4483-4634-a1f1-b15253c7d42c-util\") pod \"3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5aqnw26\" (UID: \"d69cc4af-4483-4634-a1f1-b15253c7d42c\") " pod="openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5aqnw26" Nov 26 14:36:00 crc kubenswrapper[5037]: I1126 14:36:00.437497 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pvhzc\" (UniqueName: \"kubernetes.io/projected/d69cc4af-4483-4634-a1f1-b15253c7d42c-kube-api-access-pvhzc\") pod \"3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5aqnw26\" (UID: \"d69cc4af-4483-4634-a1f1-b15253c7d42c\") " pod="openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5aqnw26" Nov 26 14:36:00 crc kubenswrapper[5037]: I1126 14:36:00.511277 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5aqnw26" Nov 26 14:36:00 crc kubenswrapper[5037]: I1126 14:36:00.951655 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5aqnw26"] Nov 26 14:36:01 crc kubenswrapper[5037]: I1126 14:36:01.772854 5037 generic.go:334] "Generic (PLEG): container finished" podID="d69cc4af-4483-4634-a1f1-b15253c7d42c" containerID="1b3cb4892d2d6d5fa1c7bb1a7a15196de591ae01a90ebf61df09f8b3fdb04dfd" exitCode=0 Nov 26 14:36:01 crc kubenswrapper[5037]: I1126 14:36:01.772982 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5aqnw26" event={"ID":"d69cc4af-4483-4634-a1f1-b15253c7d42c","Type":"ContainerDied","Data":"1b3cb4892d2d6d5fa1c7bb1a7a15196de591ae01a90ebf61df09f8b3fdb04dfd"} Nov 26 14:36:01 crc kubenswrapper[5037]: I1126 14:36:01.773389 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5aqnw26" event={"ID":"d69cc4af-4483-4634-a1f1-b15253c7d42c","Type":"ContainerStarted","Data":"42b2cb7788b88c43a8413a50c0fd437784b408a94485d0eaf5e4ff05b0c9aeac"} Nov 26 14:36:02 crc kubenswrapper[5037]: I1126 14:36:02.787861 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5aqnw26" event={"ID":"d69cc4af-4483-4634-a1f1-b15253c7d42c","Type":"ContainerStarted","Data":"ec4bf63ea811090656de1e8fcb8fa42b5503bb90556f706895111669aaa2bd41"} Nov 26 14:36:03 crc kubenswrapper[5037]: I1126 14:36:03.797882 5037 generic.go:334] "Generic (PLEG): container finished" podID="d69cc4af-4483-4634-a1f1-b15253c7d42c" containerID="ec4bf63ea811090656de1e8fcb8fa42b5503bb90556f706895111669aaa2bd41" exitCode=0 Nov 26 14:36:03 crc kubenswrapper[5037]: I1126 14:36:03.797939 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5aqnw26" event={"ID":"d69cc4af-4483-4634-a1f1-b15253c7d42c","Type":"ContainerDied","Data":"ec4bf63ea811090656de1e8fcb8fa42b5503bb90556f706895111669aaa2bd41"} Nov 26 14:36:04 crc kubenswrapper[5037]: I1126 14:36:04.807859 5037 generic.go:334] "Generic (PLEG): container finished" podID="d69cc4af-4483-4634-a1f1-b15253c7d42c" containerID="4811aa41593d2e91857ac30f9fd8aad21082912456f0a09aac0cc919cf197aa6" exitCode=0 Nov 26 14:36:04 crc kubenswrapper[5037]: I1126 14:36:04.807925 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5aqnw26" event={"ID":"d69cc4af-4483-4634-a1f1-b15253c7d42c","Type":"ContainerDied","Data":"4811aa41593d2e91857ac30f9fd8aad21082912456f0a09aac0cc919cf197aa6"} Nov 26 14:36:06 crc kubenswrapper[5037]: I1126 14:36:06.163562 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5aqnw26" Nov 26 14:36:06 crc kubenswrapper[5037]: I1126 14:36:06.300895 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d69cc4af-4483-4634-a1f1-b15253c7d42c-util\") pod \"d69cc4af-4483-4634-a1f1-b15253c7d42c\" (UID: \"d69cc4af-4483-4634-a1f1-b15253c7d42c\") " Nov 26 14:36:06 crc kubenswrapper[5037]: I1126 14:36:06.300972 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pvhzc\" (UniqueName: \"kubernetes.io/projected/d69cc4af-4483-4634-a1f1-b15253c7d42c-kube-api-access-pvhzc\") pod \"d69cc4af-4483-4634-a1f1-b15253c7d42c\" (UID: \"d69cc4af-4483-4634-a1f1-b15253c7d42c\") " Nov 26 14:36:06 crc kubenswrapper[5037]: I1126 14:36:06.301082 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d69cc4af-4483-4634-a1f1-b15253c7d42c-bundle\") pod \"d69cc4af-4483-4634-a1f1-b15253c7d42c\" (UID: \"d69cc4af-4483-4634-a1f1-b15253c7d42c\") " Nov 26 14:36:06 crc kubenswrapper[5037]: I1126 14:36:06.302254 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d69cc4af-4483-4634-a1f1-b15253c7d42c-bundle" (OuterVolumeSpecName: "bundle") pod "d69cc4af-4483-4634-a1f1-b15253c7d42c" (UID: "d69cc4af-4483-4634-a1f1-b15253c7d42c"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:36:06 crc kubenswrapper[5037]: I1126 14:36:06.302801 5037 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d69cc4af-4483-4634-a1f1-b15253c7d42c-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:36:06 crc kubenswrapper[5037]: I1126 14:36:06.307046 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d69cc4af-4483-4634-a1f1-b15253c7d42c-kube-api-access-pvhzc" (OuterVolumeSpecName: "kube-api-access-pvhzc") pod "d69cc4af-4483-4634-a1f1-b15253c7d42c" (UID: "d69cc4af-4483-4634-a1f1-b15253c7d42c"). InnerVolumeSpecName "kube-api-access-pvhzc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:36:06 crc kubenswrapper[5037]: I1126 14:36:06.332800 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d69cc4af-4483-4634-a1f1-b15253c7d42c-util" (OuterVolumeSpecName: "util") pod "d69cc4af-4483-4634-a1f1-b15253c7d42c" (UID: "d69cc4af-4483-4634-a1f1-b15253c7d42c"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:36:06 crc kubenswrapper[5037]: I1126 14:36:06.404267 5037 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d69cc4af-4483-4634-a1f1-b15253c7d42c-util\") on node \"crc\" DevicePath \"\"" Nov 26 14:36:06 crc kubenswrapper[5037]: I1126 14:36:06.404394 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pvhzc\" (UniqueName: \"kubernetes.io/projected/d69cc4af-4483-4634-a1f1-b15253c7d42c-kube-api-access-pvhzc\") on node \"crc\" DevicePath \"\"" Nov 26 14:36:06 crc kubenswrapper[5037]: I1126 14:36:06.828232 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5aqnw26" event={"ID":"d69cc4af-4483-4634-a1f1-b15253c7d42c","Type":"ContainerDied","Data":"42b2cb7788b88c43a8413a50c0fd437784b408a94485d0eaf5e4ff05b0c9aeac"} Nov 26 14:36:06 crc kubenswrapper[5037]: I1126 14:36:06.828274 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="42b2cb7788b88c43a8413a50c0fd437784b408a94485d0eaf5e4ff05b0c9aeac" Nov 26 14:36:06 crc kubenswrapper[5037]: I1126 14:36:06.828325 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5aqnw26" Nov 26 14:36:11 crc kubenswrapper[5037]: I1126 14:36:11.247866 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 14:36:11 crc kubenswrapper[5037]: I1126 14:36:11.248442 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 14:36:12 crc kubenswrapper[5037]: I1126 14:36:12.653512 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-544fb75865-wjh78"] Nov 26 14:36:12 crc kubenswrapper[5037]: E1126 14:36:12.653744 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d69cc4af-4483-4634-a1f1-b15253c7d42c" containerName="extract" Nov 26 14:36:12 crc kubenswrapper[5037]: I1126 14:36:12.653755 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="d69cc4af-4483-4634-a1f1-b15253c7d42c" containerName="extract" Nov 26 14:36:12 crc kubenswrapper[5037]: E1126 14:36:12.653772 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d69cc4af-4483-4634-a1f1-b15253c7d42c" containerName="util" Nov 26 14:36:12 crc kubenswrapper[5037]: I1126 14:36:12.653777 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="d69cc4af-4483-4634-a1f1-b15253c7d42c" containerName="util" Nov 26 14:36:12 crc kubenswrapper[5037]: E1126 14:36:12.653790 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d69cc4af-4483-4634-a1f1-b15253c7d42c" containerName="pull" Nov 26 14:36:12 crc kubenswrapper[5037]: I1126 14:36:12.653798 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="d69cc4af-4483-4634-a1f1-b15253c7d42c" containerName="pull" Nov 26 14:36:12 crc kubenswrapper[5037]: I1126 14:36:12.653902 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="d69cc4af-4483-4634-a1f1-b15253c7d42c" containerName="extract" Nov 26 14:36:12 crc kubenswrapper[5037]: I1126 14:36:12.654279 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-544fb75865-wjh78" Nov 26 14:36:12 crc kubenswrapper[5037]: I1126 14:36:12.661401 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-wnbxr" Nov 26 14:36:12 crc kubenswrapper[5037]: I1126 14:36:12.684792 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-544fb75865-wjh78"] Nov 26 14:36:12 crc kubenswrapper[5037]: I1126 14:36:12.795883 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v87ln\" (UniqueName: \"kubernetes.io/projected/ad997d1c-74cb-4e4b-bde6-6b5eefeb9332-kube-api-access-v87ln\") pod \"openstack-operator-controller-operator-544fb75865-wjh78\" (UID: \"ad997d1c-74cb-4e4b-bde6-6b5eefeb9332\") " pod="openstack-operators/openstack-operator-controller-operator-544fb75865-wjh78" Nov 26 14:36:12 crc kubenswrapper[5037]: I1126 14:36:12.897535 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v87ln\" (UniqueName: \"kubernetes.io/projected/ad997d1c-74cb-4e4b-bde6-6b5eefeb9332-kube-api-access-v87ln\") pod \"openstack-operator-controller-operator-544fb75865-wjh78\" (UID: \"ad997d1c-74cb-4e4b-bde6-6b5eefeb9332\") " pod="openstack-operators/openstack-operator-controller-operator-544fb75865-wjh78" Nov 26 14:36:12 crc kubenswrapper[5037]: I1126 14:36:12.916695 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v87ln\" (UniqueName: \"kubernetes.io/projected/ad997d1c-74cb-4e4b-bde6-6b5eefeb9332-kube-api-access-v87ln\") pod \"openstack-operator-controller-operator-544fb75865-wjh78\" (UID: \"ad997d1c-74cb-4e4b-bde6-6b5eefeb9332\") " pod="openstack-operators/openstack-operator-controller-operator-544fb75865-wjh78" Nov 26 14:36:12 crc kubenswrapper[5037]: I1126 14:36:12.973598 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-544fb75865-wjh78" Nov 26 14:36:13 crc kubenswrapper[5037]: I1126 14:36:13.484096 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-544fb75865-wjh78"] Nov 26 14:36:13 crc kubenswrapper[5037]: W1126 14:36:13.488382 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podad997d1c_74cb_4e4b_bde6_6b5eefeb9332.slice/crio-3cfd59f6f36df6542249b5b71a107ecc127b17dddf376d79845f77e7ae8cf914 WatchSource:0}: Error finding container 3cfd59f6f36df6542249b5b71a107ecc127b17dddf376d79845f77e7ae8cf914: Status 404 returned error can't find the container with id 3cfd59f6f36df6542249b5b71a107ecc127b17dddf376d79845f77e7ae8cf914 Nov 26 14:36:13 crc kubenswrapper[5037]: I1126 14:36:13.875983 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-544fb75865-wjh78" event={"ID":"ad997d1c-74cb-4e4b-bde6-6b5eefeb9332","Type":"ContainerStarted","Data":"3cfd59f6f36df6542249b5b71a107ecc127b17dddf376d79845f77e7ae8cf914"} Nov 26 14:36:18 crc kubenswrapper[5037]: I1126 14:36:18.912673 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-544fb75865-wjh78" event={"ID":"ad997d1c-74cb-4e4b-bde6-6b5eefeb9332","Type":"ContainerStarted","Data":"7abe4068356cba30a110156d46390de021083968655692e48b695fade3aa9c58"} Nov 26 14:36:18 crc kubenswrapper[5037]: I1126 14:36:18.913337 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-544fb75865-wjh78" Nov 26 14:36:18 crc kubenswrapper[5037]: I1126 14:36:18.954163 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-544fb75865-wjh78" podStartSLOduration=2.566940239 podStartE2EDuration="6.954143057s" podCreationTimestamp="2025-11-26 14:36:12 +0000 UTC" firstStartedPulling="2025-11-26 14:36:13.490968387 +0000 UTC m=+1240.287738571" lastFinishedPulling="2025-11-26 14:36:17.878171205 +0000 UTC m=+1244.674941389" observedRunningTime="2025-11-26 14:36:18.947495235 +0000 UTC m=+1245.744265469" watchObservedRunningTime="2025-11-26 14:36:18.954143057 +0000 UTC m=+1245.750913251" Nov 26 14:36:32 crc kubenswrapper[5037]: I1126 14:36:32.978188 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-544fb75865-wjh78" Nov 26 14:36:41 crc kubenswrapper[5037]: I1126 14:36:41.246891 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 14:36:41 crc kubenswrapper[5037]: I1126 14:36:41.247585 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.158566 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-7qg65"] Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.160471 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-7qg65" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.162550 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-qtprg" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.178194 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-ndntx"] Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.179434 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-ndntx" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.184731 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-7qg65"] Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.185093 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-lllpd" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.191541 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-k67q7"] Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.192817 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-955677c94-k67q7" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.195324 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-p2w75" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.215747 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-k67q7"] Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.227597 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-ndntx"] Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.261957 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-589cbd6b5b-s25dn"] Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.263085 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-s25dn" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.267631 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-jj7ll" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.277752 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-589cbd6b5b-s25dn"] Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.291193 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-927td"] Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.292855 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-927td" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.297953 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-59q28" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.303255 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-7szzf"] Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.304639 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-7szzf" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.309863 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-ggss2" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.315936 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-927td"] Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.327625 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bv7km\" (UniqueName: \"kubernetes.io/projected/dad0150e-fc25-4245-ad22-e940fadd107a-kube-api-access-bv7km\") pod \"barbican-operator-controller-manager-7b64f4fb85-7qg65\" (UID: \"dad0150e-fc25-4245-ad22-e940fadd107a\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-7qg65" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.327694 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2j8q2\" (UniqueName: \"kubernetes.io/projected/4d00a1ec-3ee8-4166-b497-e96629f2e92a-kube-api-access-2j8q2\") pod \"cinder-operator-controller-manager-6b7f75547b-ndntx\" (UID: \"4d00a1ec-3ee8-4166-b497-e96629f2e92a\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-ndntx" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.327745 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ssczk\" (UniqueName: \"kubernetes.io/projected/4fbefccf-1879-4d21-a312-44f95a16545b-kube-api-access-ssczk\") pod \"designate-operator-controller-manager-955677c94-k67q7\" (UID: \"4fbefccf-1879-4d21-a312-44f95a16545b\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-k67q7" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.338540 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-7szzf"] Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.374114 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-vdw9h"] Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.380646 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-57548d458d-vdw9h" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.387156 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.387428 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-c48jj" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.407850 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-8hkwd"] Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.411558 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-8hkwd" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.412539 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-vdw9h"] Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.418363 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-4d5pt" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.433049 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/98bbf7c3-bf20-4131-8df2-55af39d6c756-cert\") pod \"infra-operator-controller-manager-57548d458d-vdw9h\" (UID: \"98bbf7c3-bf20-4131-8df2-55af39d6c756\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-vdw9h" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.433112 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pfh5c\" (UniqueName: \"kubernetes.io/projected/aa8c9234-d8b0-4975-b4c4-83496196179f-kube-api-access-pfh5c\") pod \"heat-operator-controller-manager-5b77f656f-927td\" (UID: \"aa8c9234-d8b0-4975-b4c4-83496196179f\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-927td" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.433151 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bv7km\" (UniqueName: \"kubernetes.io/projected/dad0150e-fc25-4245-ad22-e940fadd107a-kube-api-access-bv7km\") pod \"barbican-operator-controller-manager-7b64f4fb85-7qg65\" (UID: \"dad0150e-fc25-4245-ad22-e940fadd107a\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-7qg65" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.433198 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2j8q2\" (UniqueName: \"kubernetes.io/projected/4d00a1ec-3ee8-4166-b497-e96629f2e92a-kube-api-access-2j8q2\") pod \"cinder-operator-controller-manager-6b7f75547b-ndntx\" (UID: \"4d00a1ec-3ee8-4166-b497-e96629f2e92a\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-ndntx" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.433249 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2nh2c\" (UniqueName: \"kubernetes.io/projected/668635d7-22b8-4fa0-8762-4b3c802cf9cb-kube-api-access-2nh2c\") pod \"horizon-operator-controller-manager-5d494799bf-7szzf\" (UID: \"668635d7-22b8-4fa0-8762-4b3c802cf9cb\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-7szzf" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.433275 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4z2pr\" (UniqueName: \"kubernetes.io/projected/06c51319-7e28-41b4-be90-8262eb3b7307-kube-api-access-4z2pr\") pod \"glance-operator-controller-manager-589cbd6b5b-s25dn\" (UID: \"06c51319-7e28-41b4-be90-8262eb3b7307\") " pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-s25dn" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.433339 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ssczk\" (UniqueName: \"kubernetes.io/projected/4fbefccf-1879-4d21-a312-44f95a16545b-kube-api-access-ssczk\") pod \"designate-operator-controller-manager-955677c94-k67q7\" (UID: \"4fbefccf-1879-4d21-a312-44f95a16545b\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-k67q7" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.433367 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-25wjv\" (UniqueName: \"kubernetes.io/projected/2d64e096-3666-4924-b2c3-31584884abb1-kube-api-access-25wjv\") pod \"ironic-operator-controller-manager-67cb4dc6d4-8hkwd\" (UID: \"2d64e096-3666-4924-b2c3-31584884abb1\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-8hkwd" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.433396 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bd2n7\" (UniqueName: \"kubernetes.io/projected/98bbf7c3-bf20-4131-8df2-55af39d6c756-kube-api-access-bd2n7\") pod \"infra-operator-controller-manager-57548d458d-vdw9h\" (UID: \"98bbf7c3-bf20-4131-8df2-55af39d6c756\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-vdw9h" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.440342 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-8hkwd"] Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.451397 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-bgknz"] Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.452807 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-bgknz" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.455274 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-542xs" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.465589 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ssczk\" (UniqueName: \"kubernetes.io/projected/4fbefccf-1879-4d21-a312-44f95a16545b-kube-api-access-ssczk\") pod \"designate-operator-controller-manager-955677c94-k67q7\" (UID: \"4fbefccf-1879-4d21-a312-44f95a16545b\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-k67q7" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.471229 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2j8q2\" (UniqueName: \"kubernetes.io/projected/4d00a1ec-3ee8-4166-b497-e96629f2e92a-kube-api-access-2j8q2\") pod \"cinder-operator-controller-manager-6b7f75547b-ndntx\" (UID: \"4d00a1ec-3ee8-4166-b497-e96629f2e92a\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-ndntx" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.485015 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bv7km\" (UniqueName: \"kubernetes.io/projected/dad0150e-fc25-4245-ad22-e940fadd107a-kube-api-access-bv7km\") pod \"barbican-operator-controller-manager-7b64f4fb85-7qg65\" (UID: \"dad0150e-fc25-4245-ad22-e940fadd107a\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-7qg65" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.496073 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-ndntx" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.504617 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-5d499bf58b-q6n7b"] Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.506135 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-q6n7b" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.518175 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-955677c94-k67q7" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.518874 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-px8q7" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.521370 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-bgknz"] Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.545977 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2nh2c\" (UniqueName: \"kubernetes.io/projected/668635d7-22b8-4fa0-8762-4b3c802cf9cb-kube-api-access-2nh2c\") pod \"horizon-operator-controller-manager-5d494799bf-7szzf\" (UID: \"668635d7-22b8-4fa0-8762-4b3c802cf9cb\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-7szzf" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.546025 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4z2pr\" (UniqueName: \"kubernetes.io/projected/06c51319-7e28-41b4-be90-8262eb3b7307-kube-api-access-4z2pr\") pod \"glance-operator-controller-manager-589cbd6b5b-s25dn\" (UID: \"06c51319-7e28-41b4-be90-8262eb3b7307\") " pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-s25dn" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.546059 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-25wjv\" (UniqueName: \"kubernetes.io/projected/2d64e096-3666-4924-b2c3-31584884abb1-kube-api-access-25wjv\") pod \"ironic-operator-controller-manager-67cb4dc6d4-8hkwd\" (UID: \"2d64e096-3666-4924-b2c3-31584884abb1\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-8hkwd" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.546085 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bd2n7\" (UniqueName: \"kubernetes.io/projected/98bbf7c3-bf20-4131-8df2-55af39d6c756-kube-api-access-bd2n7\") pod \"infra-operator-controller-manager-57548d458d-vdw9h\" (UID: \"98bbf7c3-bf20-4131-8df2-55af39d6c756\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-vdw9h" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.546128 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/98bbf7c3-bf20-4131-8df2-55af39d6c756-cert\") pod \"infra-operator-controller-manager-57548d458d-vdw9h\" (UID: \"98bbf7c3-bf20-4131-8df2-55af39d6c756\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-vdw9h" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.546165 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pfh5c\" (UniqueName: \"kubernetes.io/projected/aa8c9234-d8b0-4975-b4c4-83496196179f-kube-api-access-pfh5c\") pod \"heat-operator-controller-manager-5b77f656f-927td\" (UID: \"aa8c9234-d8b0-4975-b4c4-83496196179f\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-927td" Nov 26 14:36:59 crc kubenswrapper[5037]: E1126 14:36:59.547624 5037 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 26 14:36:59 crc kubenswrapper[5037]: E1126 14:36:59.547675 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/98bbf7c3-bf20-4131-8df2-55af39d6c756-cert podName:98bbf7c3-bf20-4131-8df2-55af39d6c756 nodeName:}" failed. No retries permitted until 2025-11-26 14:37:00.047655911 +0000 UTC m=+1286.844426095 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/98bbf7c3-bf20-4131-8df2-55af39d6c756-cert") pod "infra-operator-controller-manager-57548d458d-vdw9h" (UID: "98bbf7c3-bf20-4131-8df2-55af39d6c756") : secret "infra-operator-webhook-server-cert" not found Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.567361 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5d499bf58b-q6n7b"] Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.575965 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pfh5c\" (UniqueName: \"kubernetes.io/projected/aa8c9234-d8b0-4975-b4c4-83496196179f-kube-api-access-pfh5c\") pod \"heat-operator-controller-manager-5b77f656f-927td\" (UID: \"aa8c9234-d8b0-4975-b4c4-83496196179f\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-927td" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.576126 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-z7cq5"] Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.577502 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-z7cq5" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.631242 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-bvg8p" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.668033 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bd2n7\" (UniqueName: \"kubernetes.io/projected/98bbf7c3-bf20-4131-8df2-55af39d6c756-kube-api-access-bd2n7\") pod \"infra-operator-controller-manager-57548d458d-vdw9h\" (UID: \"98bbf7c3-bf20-4131-8df2-55af39d6c756\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-vdw9h" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.736815 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4z2pr\" (UniqueName: \"kubernetes.io/projected/06c51319-7e28-41b4-be90-8262eb3b7307-kube-api-access-4z2pr\") pod \"glance-operator-controller-manager-589cbd6b5b-s25dn\" (UID: \"06c51319-7e28-41b4-be90-8262eb3b7307\") " pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-s25dn" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.738006 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2nh2c\" (UniqueName: \"kubernetes.io/projected/668635d7-22b8-4fa0-8762-4b3c802cf9cb-kube-api-access-2nh2c\") pod \"horizon-operator-controller-manager-5d494799bf-7szzf\" (UID: \"668635d7-22b8-4fa0-8762-4b3c802cf9cb\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-7szzf" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.740649 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dxjfx\" (UniqueName: \"kubernetes.io/projected/8e942820-209d-40b6-bd79-1836b7af00bb-kube-api-access-dxjfx\") pod \"keystone-operator-controller-manager-7b4567c7cf-bgknz\" (UID: \"8e942820-209d-40b6-bd79-1836b7af00bb\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-bgknz" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.740882 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-927td" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.741669 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-25wjv\" (UniqueName: \"kubernetes.io/projected/2d64e096-3666-4924-b2c3-31584884abb1-kube-api-access-25wjv\") pod \"ironic-operator-controller-manager-67cb4dc6d4-8hkwd\" (UID: \"2d64e096-3666-4924-b2c3-31584884abb1\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-8hkwd" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.742112 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-z7cq5"] Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.743453 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-989d5\" (UniqueName: \"kubernetes.io/projected/3982528b-3a86-43af-a0af-2f0ddd71e349-kube-api-access-989d5\") pod \"manila-operator-controller-manager-5d499bf58b-q6n7b\" (UID: \"3982528b-3a86-43af-a0af-2f0ddd71e349\") " pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-q6n7b" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.758334 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-8hkwd" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.779754 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-7qg65" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.845278 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-shv5p"] Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.846814 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-shv5p" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.848344 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-989d5\" (UniqueName: \"kubernetes.io/projected/3982528b-3a86-43af-a0af-2f0ddd71e349-kube-api-access-989d5\") pod \"manila-operator-controller-manager-5d499bf58b-q6n7b\" (UID: \"3982528b-3a86-43af-a0af-2f0ddd71e349\") " pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-q6n7b" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.848432 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghv94\" (UniqueName: \"kubernetes.io/projected/8da78b02-ca91-4fca-8710-875bfdd6e6a9-kube-api-access-ghv94\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-z7cq5\" (UID: \"8da78b02-ca91-4fca-8710-875bfdd6e6a9\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-z7cq5" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.848461 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dxjfx\" (UniqueName: \"kubernetes.io/projected/8e942820-209d-40b6-bd79-1836b7af00bb-kube-api-access-dxjfx\") pod \"keystone-operator-controller-manager-7b4567c7cf-bgknz\" (UID: \"8e942820-209d-40b6-bd79-1836b7af00bb\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-bgknz" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.854430 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-shv5p"] Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.871039 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-9rlsm" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.886176 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-skfsl"] Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.887536 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-989d5\" (UniqueName: \"kubernetes.io/projected/3982528b-3a86-43af-a0af-2f0ddd71e349-kube-api-access-989d5\") pod \"manila-operator-controller-manager-5d499bf58b-q6n7b\" (UID: \"3982528b-3a86-43af-a0af-2f0ddd71e349\") " pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-q6n7b" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.887647 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-skfsl" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.898222 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dxjfx\" (UniqueName: \"kubernetes.io/projected/8e942820-209d-40b6-bd79-1836b7af00bb-kube-api-access-dxjfx\") pod \"keystone-operator-controller-manager-7b4567c7cf-bgknz\" (UID: \"8e942820-209d-40b6-bd79-1836b7af00bb\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-bgknz" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.901702 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-74shb" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.902812 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-s25dn" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.934780 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-q4qd8"] Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.937791 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-q4qd8" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.944972 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-rxjst" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.945981 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-bgknz" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.951985 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-q4qd8"] Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.954771 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xddv6\" (UniqueName: \"kubernetes.io/projected/4acfd23c-4a99-4705-9312-fa6e816d7004-kube-api-access-xddv6\") pod \"neutron-operator-controller-manager-6fdcddb789-shv5p\" (UID: \"4acfd23c-4a99-4705-9312-fa6e816d7004\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-shv5p" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.954831 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ghv94\" (UniqueName: \"kubernetes.io/projected/8da78b02-ca91-4fca-8710-875bfdd6e6a9-kube-api-access-ghv94\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-z7cq5\" (UID: \"8da78b02-ca91-4fca-8710-875bfdd6e6a9\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-z7cq5" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.963340 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8xwf2x"] Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.964506 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8xwf2x" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.967221 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-7szzf" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.969879 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-5b66h" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.970070 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.984810 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-skfsl"] Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.992872 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ghv94\" (UniqueName: \"kubernetes.io/projected/8da78b02-ca91-4fca-8710-875bfdd6e6a9-kube-api-access-ghv94\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-z7cq5\" (UID: \"8da78b02-ca91-4fca-8710-875bfdd6e6a9\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-z7cq5" Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.995434 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-hn8b8"] Nov 26 14:36:59 crc kubenswrapper[5037]: I1126 14:36:59.997767 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-hn8b8" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.009863 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-9dzl2"] Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.009980 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-cshzf" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.011348 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-d77b94747-9dzl2" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.015926 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-kgzfm" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.030575 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8xwf2x"] Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.048251 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-q6n7b" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.048482 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-hn8b8"] Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.050216 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-9dzl2"] Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.060518 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-f5hkj"] Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.063166 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-f5hkj" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.063838 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/98bbf7c3-bf20-4131-8df2-55af39d6c756-cert\") pod \"infra-operator-controller-manager-57548d458d-vdw9h\" (UID: \"98bbf7c3-bf20-4131-8df2-55af39d6c756\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-vdw9h" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.063954 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-26j56\" (UniqueName: \"kubernetes.io/projected/7311c1ce-321d-49a7-b616-6b8f3fb2ce8c-kube-api-access-26j56\") pod \"openstack-baremetal-operator-controller-manager-674cb676c8xwf2x\" (UID: \"7311c1ce-321d-49a7-b616-6b8f3fb2ce8c\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8xwf2x" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.064028 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w2zn9\" (UniqueName: \"kubernetes.io/projected/a84b911c-ef23-4267-bfdb-0b9c9d8b9070-kube-api-access-w2zn9\") pod \"nova-operator-controller-manager-79556f57fc-q4qd8\" (UID: \"a84b911c-ef23-4267-bfdb-0b9c9d8b9070\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-q4qd8" Nov 26 14:37:00 crc kubenswrapper[5037]: E1126 14:37:00.064045 5037 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.064065 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w8nqc\" (UniqueName: \"kubernetes.io/projected/a5ef500e-85f6-4655-af56-720d8e23d4b0-kube-api-access-w8nqc\") pod \"octavia-operator-controller-manager-64cdc6ff96-skfsl\" (UID: \"a5ef500e-85f6-4655-af56-720d8e23d4b0\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-skfsl" Nov 26 14:37:00 crc kubenswrapper[5037]: E1126 14:37:00.064094 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/98bbf7c3-bf20-4131-8df2-55af39d6c756-cert podName:98bbf7c3-bf20-4131-8df2-55af39d6c756 nodeName:}" failed. No retries permitted until 2025-11-26 14:37:01.064075815 +0000 UTC m=+1287.860845999 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/98bbf7c3-bf20-4131-8df2-55af39d6c756-cert") pod "infra-operator-controller-manager-57548d458d-vdw9h" (UID: "98bbf7c3-bf20-4131-8df2-55af39d6c756") : secret "infra-operator-webhook-server-cert" not found Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.064120 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xddv6\" (UniqueName: \"kubernetes.io/projected/4acfd23c-4a99-4705-9312-fa6e816d7004-kube-api-access-xddv6\") pod \"neutron-operator-controller-manager-6fdcddb789-shv5p\" (UID: \"4acfd23c-4a99-4705-9312-fa6e816d7004\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-shv5p" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.064240 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7311c1ce-321d-49a7-b616-6b8f3fb2ce8c-cert\") pod \"openstack-baremetal-operator-controller-manager-674cb676c8xwf2x\" (UID: \"7311c1ce-321d-49a7-b616-6b8f3fb2ce8c\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8xwf2x" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.070474 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-wwcg8" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.084923 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-f5hkj"] Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.095170 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-wrk4n"] Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.096487 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-z7cq5" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.096899 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-wrk4n" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.101245 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xddv6\" (UniqueName: \"kubernetes.io/projected/4acfd23c-4a99-4705-9312-fa6e816d7004-kube-api-access-xddv6\") pod \"neutron-operator-controller-manager-6fdcddb789-shv5p\" (UID: \"4acfd23c-4a99-4705-9312-fa6e816d7004\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-shv5p" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.104257 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-xj5r9" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.110432 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-wrk4n"] Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.128326 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-fcz7d"] Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.129977 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-fcz7d" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.132316 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-mg9vp" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.135312 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-fcz7d"] Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.141473 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-tsv6x"] Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.145130 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-tsv6x" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.150130 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-7m4w2" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.151051 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-tsv6x"] Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.171847 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qf2fz\" (UniqueName: \"kubernetes.io/projected/bd47a58c-4525-4ca4-9e18-9971afc83d7a-kube-api-access-qf2fz\") pod \"placement-operator-controller-manager-57988cc5b5-f5hkj\" (UID: \"bd47a58c-4525-4ca4-9e18-9971afc83d7a\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-f5hkj" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.172188 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-26j56\" (UniqueName: \"kubernetes.io/projected/7311c1ce-321d-49a7-b616-6b8f3fb2ce8c-kube-api-access-26j56\") pod \"openstack-baremetal-operator-controller-manager-674cb676c8xwf2x\" (UID: \"7311c1ce-321d-49a7-b616-6b8f3fb2ce8c\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8xwf2x" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.172247 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w2zn9\" (UniqueName: \"kubernetes.io/projected/a84b911c-ef23-4267-bfdb-0b9c9d8b9070-kube-api-access-w2zn9\") pod \"nova-operator-controller-manager-79556f57fc-q4qd8\" (UID: \"a84b911c-ef23-4267-bfdb-0b9c9d8b9070\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-q4qd8" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.172269 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dvlvq\" (UniqueName: \"kubernetes.io/projected/7b497253-ea07-43ac-a78f-d2a145344041-kube-api-access-dvlvq\") pod \"swift-operator-controller-manager-d77b94747-9dzl2\" (UID: \"7b497253-ea07-43ac-a78f-d2a145344041\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-9dzl2" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.172298 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9ssm\" (UniqueName: \"kubernetes.io/projected/9aa6edf8-6550-4e67-a36c-c1821a4e0778-kube-api-access-p9ssm\") pod \"ovn-operator-controller-manager-56897c768d-hn8b8\" (UID: \"9aa6edf8-6550-4e67-a36c-c1821a4e0778\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-hn8b8" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.172333 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w8nqc\" (UniqueName: \"kubernetes.io/projected/a5ef500e-85f6-4655-af56-720d8e23d4b0-kube-api-access-w8nqc\") pod \"octavia-operator-controller-manager-64cdc6ff96-skfsl\" (UID: \"a5ef500e-85f6-4655-af56-720d8e23d4b0\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-skfsl" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.172360 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7311c1ce-321d-49a7-b616-6b8f3fb2ce8c-cert\") pod \"openstack-baremetal-operator-controller-manager-674cb676c8xwf2x\" (UID: \"7311c1ce-321d-49a7-b616-6b8f3fb2ce8c\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8xwf2x" Nov 26 14:37:00 crc kubenswrapper[5037]: E1126 14:37:00.172512 5037 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 14:37:00 crc kubenswrapper[5037]: E1126 14:37:00.172561 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7311c1ce-321d-49a7-b616-6b8f3fb2ce8c-cert podName:7311c1ce-321d-49a7-b616-6b8f3fb2ce8c nodeName:}" failed. No retries permitted until 2025-11-26 14:37:00.672545275 +0000 UTC m=+1287.469315459 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/7311c1ce-321d-49a7-b616-6b8f3fb2ce8c-cert") pod "openstack-baremetal-operator-controller-manager-674cb676c8xwf2x" (UID: "7311c1ce-321d-49a7-b616-6b8f3fb2ce8c") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.181054 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-659d75f7c6-7qgq6"] Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.181868 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-7qgq6" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.184574 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.184744 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.185156 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-f2rz2" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.226849 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-659d75f7c6-7qgq6"] Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.227997 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-26j56\" (UniqueName: \"kubernetes.io/projected/7311c1ce-321d-49a7-b616-6b8f3fb2ce8c-kube-api-access-26j56\") pod \"openstack-baremetal-operator-controller-manager-674cb676c8xwf2x\" (UID: \"7311c1ce-321d-49a7-b616-6b8f3fb2ce8c\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8xwf2x" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.235753 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-shv5p" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.245739 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w8nqc\" (UniqueName: \"kubernetes.io/projected/a5ef500e-85f6-4655-af56-720d8e23d4b0-kube-api-access-w8nqc\") pod \"octavia-operator-controller-manager-64cdc6ff96-skfsl\" (UID: \"a5ef500e-85f6-4655-af56-720d8e23d4b0\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-skfsl" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.254174 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w2zn9\" (UniqueName: \"kubernetes.io/projected/a84b911c-ef23-4267-bfdb-0b9c9d8b9070-kube-api-access-w2zn9\") pod \"nova-operator-controller-manager-79556f57fc-q4qd8\" (UID: \"a84b911c-ef23-4267-bfdb-0b9c9d8b9070\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-q4qd8" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.254308 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-k67q7" event={"ID":"4fbefccf-1879-4d21-a312-44f95a16545b","Type":"ContainerStarted","Data":"cc0e0150f26bb226203645c561965d255a480c7e7450b452c06e770f9dd6a7ca"} Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.273821 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qhdf2\" (UniqueName: \"kubernetes.io/projected/46f325c2-aa51-4684-aec4-0c31eb822e6d-kube-api-access-qhdf2\") pod \"watcher-operator-controller-manager-656dcb59d4-tsv6x\" (UID: \"46f325c2-aa51-4684-aec4-0c31eb822e6d\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-tsv6x" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.273886 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qf2fz\" (UniqueName: \"kubernetes.io/projected/bd47a58c-4525-4ca4-9e18-9971afc83d7a-kube-api-access-qf2fz\") pod \"placement-operator-controller-manager-57988cc5b5-f5hkj\" (UID: \"bd47a58c-4525-4ca4-9e18-9971afc83d7a\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-f5hkj" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.273907 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gtc4v\" (UniqueName: \"kubernetes.io/projected/63e61192-9513-41ce-a7f9-983264d63ce8-kube-api-access-gtc4v\") pod \"telemetry-operator-controller-manager-76cc84c6bb-wrk4n\" (UID: \"63e61192-9513-41ce-a7f9-983264d63ce8\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-wrk4n" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.273949 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dvlvq\" (UniqueName: \"kubernetes.io/projected/7b497253-ea07-43ac-a78f-d2a145344041-kube-api-access-dvlvq\") pod \"swift-operator-controller-manager-d77b94747-9dzl2\" (UID: \"7b497253-ea07-43ac-a78f-d2a145344041\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-9dzl2" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.273969 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9ssm\" (UniqueName: \"kubernetes.io/projected/9aa6edf8-6550-4e67-a36c-c1821a4e0778-kube-api-access-p9ssm\") pod \"ovn-operator-controller-manager-56897c768d-hn8b8\" (UID: \"9aa6edf8-6550-4e67-a36c-c1821a4e0778\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-hn8b8" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.274020 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sfjg8\" (UniqueName: \"kubernetes.io/projected/abb38dd1-fa1b-4056-85f7-2ebbe18977b9-kube-api-access-sfjg8\") pod \"test-operator-controller-manager-5cd6c7f4c8-fcz7d\" (UID: \"abb38dd1-fa1b-4056-85f7-2ebbe18977b9\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-fcz7d" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.274149 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-skfsl" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.283045 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mjnxg"] Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.284166 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mjnxg" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.292323 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-p9t9j" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.296425 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-q4qd8" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.300969 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mjnxg"] Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.309909 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-k67q7"] Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.312447 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9ssm\" (UniqueName: \"kubernetes.io/projected/9aa6edf8-6550-4e67-a36c-c1821a4e0778-kube-api-access-p9ssm\") pod \"ovn-operator-controller-manager-56897c768d-hn8b8\" (UID: \"9aa6edf8-6550-4e67-a36c-c1821a4e0778\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-hn8b8" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.314105 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dvlvq\" (UniqueName: \"kubernetes.io/projected/7b497253-ea07-43ac-a78f-d2a145344041-kube-api-access-dvlvq\") pod \"swift-operator-controller-manager-d77b94747-9dzl2\" (UID: \"7b497253-ea07-43ac-a78f-d2a145344041\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-9dzl2" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.315509 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qf2fz\" (UniqueName: \"kubernetes.io/projected/bd47a58c-4525-4ca4-9e18-9971afc83d7a-kube-api-access-qf2fz\") pod \"placement-operator-controller-manager-57988cc5b5-f5hkj\" (UID: \"bd47a58c-4525-4ca4-9e18-9971afc83d7a\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-f5hkj" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.340506 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-hn8b8" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.348655 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-d77b94747-9dzl2" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.375165 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qhdf2\" (UniqueName: \"kubernetes.io/projected/46f325c2-aa51-4684-aec4-0c31eb822e6d-kube-api-access-qhdf2\") pod \"watcher-operator-controller-manager-656dcb59d4-tsv6x\" (UID: \"46f325c2-aa51-4684-aec4-0c31eb822e6d\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-tsv6x" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.375800 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gtc4v\" (UniqueName: \"kubernetes.io/projected/63e61192-9513-41ce-a7f9-983264d63ce8-kube-api-access-gtc4v\") pod \"telemetry-operator-controller-manager-76cc84c6bb-wrk4n\" (UID: \"63e61192-9513-41ce-a7f9-983264d63ce8\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-wrk4n" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.375914 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6scv\" (UniqueName: \"kubernetes.io/projected/2446d4a3-2a56-4a21-9726-19cfcfcfd203-kube-api-access-g6scv\") pod \"openstack-operator-controller-manager-659d75f7c6-7qgq6\" (UID: \"2446d4a3-2a56-4a21-9726-19cfcfcfd203\") " pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-7qgq6" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.376037 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2446d4a3-2a56-4a21-9726-19cfcfcfd203-metrics-certs\") pod \"openstack-operator-controller-manager-659d75f7c6-7qgq6\" (UID: \"2446d4a3-2a56-4a21-9726-19cfcfcfd203\") " pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-7qgq6" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.376351 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sfjg8\" (UniqueName: \"kubernetes.io/projected/abb38dd1-fa1b-4056-85f7-2ebbe18977b9-kube-api-access-sfjg8\") pod \"test-operator-controller-manager-5cd6c7f4c8-fcz7d\" (UID: \"abb38dd1-fa1b-4056-85f7-2ebbe18977b9\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-fcz7d" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.376433 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/2446d4a3-2a56-4a21-9726-19cfcfcfd203-webhook-certs\") pod \"openstack-operator-controller-manager-659d75f7c6-7qgq6\" (UID: \"2446d4a3-2a56-4a21-9726-19cfcfcfd203\") " pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-7qgq6" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.400442 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gtc4v\" (UniqueName: \"kubernetes.io/projected/63e61192-9513-41ce-a7f9-983264d63ce8-kube-api-access-gtc4v\") pod \"telemetry-operator-controller-manager-76cc84c6bb-wrk4n\" (UID: \"63e61192-9513-41ce-a7f9-983264d63ce8\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-wrk4n" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.406576 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sfjg8\" (UniqueName: \"kubernetes.io/projected/abb38dd1-fa1b-4056-85f7-2ebbe18977b9-kube-api-access-sfjg8\") pod \"test-operator-controller-manager-5cd6c7f4c8-fcz7d\" (UID: \"abb38dd1-fa1b-4056-85f7-2ebbe18977b9\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-fcz7d" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.407873 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qhdf2\" (UniqueName: \"kubernetes.io/projected/46f325c2-aa51-4684-aec4-0c31eb822e6d-kube-api-access-qhdf2\") pod \"watcher-operator-controller-manager-656dcb59d4-tsv6x\" (UID: \"46f325c2-aa51-4684-aec4-0c31eb822e6d\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-tsv6x" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.453649 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-f5hkj" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.474614 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-wrk4n" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.478406 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tz44q\" (UniqueName: \"kubernetes.io/projected/66ef6065-211b-4aa2-b2f5-6386ee020518-kube-api-access-tz44q\") pod \"rabbitmq-cluster-operator-manager-668c99d594-mjnxg\" (UID: \"66ef6065-211b-4aa2-b2f5-6386ee020518\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mjnxg" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.478527 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g6scv\" (UniqueName: \"kubernetes.io/projected/2446d4a3-2a56-4a21-9726-19cfcfcfd203-kube-api-access-g6scv\") pod \"openstack-operator-controller-manager-659d75f7c6-7qgq6\" (UID: \"2446d4a3-2a56-4a21-9726-19cfcfcfd203\") " pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-7qgq6" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.478553 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2446d4a3-2a56-4a21-9726-19cfcfcfd203-metrics-certs\") pod \"openstack-operator-controller-manager-659d75f7c6-7qgq6\" (UID: \"2446d4a3-2a56-4a21-9726-19cfcfcfd203\") " pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-7qgq6" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.478606 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/2446d4a3-2a56-4a21-9726-19cfcfcfd203-webhook-certs\") pod \"openstack-operator-controller-manager-659d75f7c6-7qgq6\" (UID: \"2446d4a3-2a56-4a21-9726-19cfcfcfd203\") " pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-7qgq6" Nov 26 14:37:00 crc kubenswrapper[5037]: E1126 14:37:00.478718 5037 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 26 14:37:00 crc kubenswrapper[5037]: E1126 14:37:00.478775 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2446d4a3-2a56-4a21-9726-19cfcfcfd203-webhook-certs podName:2446d4a3-2a56-4a21-9726-19cfcfcfd203 nodeName:}" failed. No retries permitted until 2025-11-26 14:37:00.978750625 +0000 UTC m=+1287.775520809 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/2446d4a3-2a56-4a21-9726-19cfcfcfd203-webhook-certs") pod "openstack-operator-controller-manager-659d75f7c6-7qgq6" (UID: "2446d4a3-2a56-4a21-9726-19cfcfcfd203") : secret "webhook-server-cert" not found Nov 26 14:37:00 crc kubenswrapper[5037]: E1126 14:37:00.478881 5037 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 26 14:37:00 crc kubenswrapper[5037]: E1126 14:37:00.478952 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2446d4a3-2a56-4a21-9726-19cfcfcfd203-metrics-certs podName:2446d4a3-2a56-4a21-9726-19cfcfcfd203 nodeName:}" failed. No retries permitted until 2025-11-26 14:37:00.9789312 +0000 UTC m=+1287.775701384 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2446d4a3-2a56-4a21-9726-19cfcfcfd203-metrics-certs") pod "openstack-operator-controller-manager-659d75f7c6-7qgq6" (UID: "2446d4a3-2a56-4a21-9726-19cfcfcfd203") : secret "metrics-server-cert" not found Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.501435 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g6scv\" (UniqueName: \"kubernetes.io/projected/2446d4a3-2a56-4a21-9726-19cfcfcfd203-kube-api-access-g6scv\") pod \"openstack-operator-controller-manager-659d75f7c6-7qgq6\" (UID: \"2446d4a3-2a56-4a21-9726-19cfcfcfd203\") " pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-7qgq6" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.515985 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-fcz7d" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.582520 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tz44q\" (UniqueName: \"kubernetes.io/projected/66ef6065-211b-4aa2-b2f5-6386ee020518-kube-api-access-tz44q\") pod \"rabbitmq-cluster-operator-manager-668c99d594-mjnxg\" (UID: \"66ef6065-211b-4aa2-b2f5-6386ee020518\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mjnxg" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.615524 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tz44q\" (UniqueName: \"kubernetes.io/projected/66ef6065-211b-4aa2-b2f5-6386ee020518-kube-api-access-tz44q\") pod \"rabbitmq-cluster-operator-manager-668c99d594-mjnxg\" (UID: \"66ef6065-211b-4aa2-b2f5-6386ee020518\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mjnxg" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.645823 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-tsv6x" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.653496 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-ndntx"] Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.673729 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mjnxg" Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.684027 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7311c1ce-321d-49a7-b616-6b8f3fb2ce8c-cert\") pod \"openstack-baremetal-operator-controller-manager-674cb676c8xwf2x\" (UID: \"7311c1ce-321d-49a7-b616-6b8f3fb2ce8c\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8xwf2x" Nov 26 14:37:00 crc kubenswrapper[5037]: E1126 14:37:00.684313 5037 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 14:37:00 crc kubenswrapper[5037]: E1126 14:37:00.684387 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7311c1ce-321d-49a7-b616-6b8f3fb2ce8c-cert podName:7311c1ce-321d-49a7-b616-6b8f3fb2ce8c nodeName:}" failed. No retries permitted until 2025-11-26 14:37:01.684362827 +0000 UTC m=+1288.481133011 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/7311c1ce-321d-49a7-b616-6b8f3fb2ce8c-cert") pod "openstack-baremetal-operator-controller-manager-674cb676c8xwf2x" (UID: "7311c1ce-321d-49a7-b616-6b8f3fb2ce8c") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.697061 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-927td"] Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.870775 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-7qg65"] Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.895092 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-589cbd6b5b-s25dn"] Nov 26 14:37:00 crc kubenswrapper[5037]: I1126 14:37:00.916425 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-7szzf"] Nov 26 14:37:00 crc kubenswrapper[5037]: W1126 14:37:00.963861 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod668635d7_22b8_4fa0_8762_4b3c802cf9cb.slice/crio-983689d8b3a72d386cd522962f5ed979c03ea2cdeb2b7dd4685e7800566a0fab WatchSource:0}: Error finding container 983689d8b3a72d386cd522962f5ed979c03ea2cdeb2b7dd4685e7800566a0fab: Status 404 returned error can't find the container with id 983689d8b3a72d386cd522962f5ed979c03ea2cdeb2b7dd4685e7800566a0fab Nov 26 14:37:00 crc kubenswrapper[5037]: W1126 14:37:00.966377 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddad0150e_fc25_4245_ad22_e940fadd107a.slice/crio-13ed4b1d9f2a39a0f1528ab69e70a31d5e880021d3a358980f8546ee79b617bf WatchSource:0}: Error finding container 13ed4b1d9f2a39a0f1528ab69e70a31d5e880021d3a358980f8546ee79b617bf: Status 404 returned error can't find the container with id 13ed4b1d9f2a39a0f1528ab69e70a31d5e880021d3a358980f8546ee79b617bf Nov 26 14:37:01 crc kubenswrapper[5037]: I1126 14:37:01.001936 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/2446d4a3-2a56-4a21-9726-19cfcfcfd203-webhook-certs\") pod \"openstack-operator-controller-manager-659d75f7c6-7qgq6\" (UID: \"2446d4a3-2a56-4a21-9726-19cfcfcfd203\") " pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-7qgq6" Nov 26 14:37:01 crc kubenswrapper[5037]: I1126 14:37:01.002100 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2446d4a3-2a56-4a21-9726-19cfcfcfd203-metrics-certs\") pod \"openstack-operator-controller-manager-659d75f7c6-7qgq6\" (UID: \"2446d4a3-2a56-4a21-9726-19cfcfcfd203\") " pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-7qgq6" Nov 26 14:37:01 crc kubenswrapper[5037]: E1126 14:37:01.002141 5037 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 26 14:37:01 crc kubenswrapper[5037]: E1126 14:37:01.002214 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2446d4a3-2a56-4a21-9726-19cfcfcfd203-webhook-certs podName:2446d4a3-2a56-4a21-9726-19cfcfcfd203 nodeName:}" failed. No retries permitted until 2025-11-26 14:37:02.002193021 +0000 UTC m=+1288.798963205 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/2446d4a3-2a56-4a21-9726-19cfcfcfd203-webhook-certs") pod "openstack-operator-controller-manager-659d75f7c6-7qgq6" (UID: "2446d4a3-2a56-4a21-9726-19cfcfcfd203") : secret "webhook-server-cert" not found Nov 26 14:37:01 crc kubenswrapper[5037]: E1126 14:37:01.002234 5037 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 26 14:37:01 crc kubenswrapper[5037]: E1126 14:37:01.002312 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2446d4a3-2a56-4a21-9726-19cfcfcfd203-metrics-certs podName:2446d4a3-2a56-4a21-9726-19cfcfcfd203 nodeName:}" failed. No retries permitted until 2025-11-26 14:37:02.002272964 +0000 UTC m=+1288.799043148 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2446d4a3-2a56-4a21-9726-19cfcfcfd203-metrics-certs") pod "openstack-operator-controller-manager-659d75f7c6-7qgq6" (UID: "2446d4a3-2a56-4a21-9726-19cfcfcfd203") : secret "metrics-server-cert" not found Nov 26 14:37:01 crc kubenswrapper[5037]: I1126 14:37:01.055200 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-bgknz"] Nov 26 14:37:01 crc kubenswrapper[5037]: I1126 14:37:01.082471 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-z7cq5"] Nov 26 14:37:01 crc kubenswrapper[5037]: I1126 14:37:01.091102 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-shv5p"] Nov 26 14:37:01 crc kubenswrapper[5037]: I1126 14:37:01.096417 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-8hkwd"] Nov 26 14:37:01 crc kubenswrapper[5037]: I1126 14:37:01.105611 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/98bbf7c3-bf20-4131-8df2-55af39d6c756-cert\") pod \"infra-operator-controller-manager-57548d458d-vdw9h\" (UID: \"98bbf7c3-bf20-4131-8df2-55af39d6c756\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-vdw9h" Nov 26 14:37:01 crc kubenswrapper[5037]: E1126 14:37:01.105793 5037 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 26 14:37:01 crc kubenswrapper[5037]: E1126 14:37:01.105843 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/98bbf7c3-bf20-4131-8df2-55af39d6c756-cert podName:98bbf7c3-bf20-4131-8df2-55af39d6c756 nodeName:}" failed. No retries permitted until 2025-11-26 14:37:03.105827912 +0000 UTC m=+1289.902598096 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/98bbf7c3-bf20-4131-8df2-55af39d6c756-cert") pod "infra-operator-controller-manager-57548d458d-vdw9h" (UID: "98bbf7c3-bf20-4131-8df2-55af39d6c756") : secret "infra-operator-webhook-server-cert" not found Nov 26 14:37:01 crc kubenswrapper[5037]: W1126 14:37:01.125198 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2d64e096_3666_4924_b2c3_31584884abb1.slice/crio-94c497c74ccf7560b76720c4d84a78e09cb1644e04ebeefd7cfad53ad01a76d7 WatchSource:0}: Error finding container 94c497c74ccf7560b76720c4d84a78e09cb1644e04ebeefd7cfad53ad01a76d7: Status 404 returned error can't find the container with id 94c497c74ccf7560b76720c4d84a78e09cb1644e04ebeefd7cfad53ad01a76d7 Nov 26 14:37:01 crc kubenswrapper[5037]: I1126 14:37:01.143449 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-hn8b8"] Nov 26 14:37:01 crc kubenswrapper[5037]: I1126 14:37:01.217595 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5d499bf58b-q6n7b"] Nov 26 14:37:01 crc kubenswrapper[5037]: W1126 14:37:01.219599 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3982528b_3a86_43af_a0af_2f0ddd71e349.slice/crio-11e200945848ae8c3c117a95e160c856a7eb36c673ca431992f46f95a5ecdc2c WatchSource:0}: Error finding container 11e200945848ae8c3c117a95e160c856a7eb36c673ca431992f46f95a5ecdc2c: Status 404 returned error can't find the container with id 11e200945848ae8c3c117a95e160c856a7eb36c673ca431992f46f95a5ecdc2c Nov 26 14:37:01 crc kubenswrapper[5037]: I1126 14:37:01.265678 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-7qg65" event={"ID":"dad0150e-fc25-4245-ad22-e940fadd107a","Type":"ContainerStarted","Data":"13ed4b1d9f2a39a0f1528ab69e70a31d5e880021d3a358980f8546ee79b617bf"} Nov 26 14:37:01 crc kubenswrapper[5037]: I1126 14:37:01.270073 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-q6n7b" event={"ID":"3982528b-3a86-43af-a0af-2f0ddd71e349","Type":"ContainerStarted","Data":"11e200945848ae8c3c117a95e160c856a7eb36c673ca431992f46f95a5ecdc2c"} Nov 26 14:37:01 crc kubenswrapper[5037]: I1126 14:37:01.271573 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-927td" event={"ID":"aa8c9234-d8b0-4975-b4c4-83496196179f","Type":"ContainerStarted","Data":"906f0edeffc0b5e3709b2b82bdb15744f80d1957401b4b7a6b88a38ec1b782d0"} Nov 26 14:37:01 crc kubenswrapper[5037]: I1126 14:37:01.272555 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-ndntx" event={"ID":"4d00a1ec-3ee8-4166-b497-e96629f2e92a","Type":"ContainerStarted","Data":"6dd3ac03e19e92b4ec3d354eb45606745110bf7a015e468ebd2f5c16566cd779"} Nov 26 14:37:01 crc kubenswrapper[5037]: I1126 14:37:01.273793 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-bgknz" event={"ID":"8e942820-209d-40b6-bd79-1836b7af00bb","Type":"ContainerStarted","Data":"51fd8f70696a5b16646779f618ce377b0a48bf401bb47b4ed7dd57465b2ca95c"} Nov 26 14:37:01 crc kubenswrapper[5037]: I1126 14:37:01.277890 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-z7cq5" event={"ID":"8da78b02-ca91-4fca-8710-875bfdd6e6a9","Type":"ContainerStarted","Data":"ad24648593b511f66b549ae265419f5463a5e23574d3026b164c1a4abdcc21cd"} Nov 26 14:37:01 crc kubenswrapper[5037]: I1126 14:37:01.279790 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-shv5p" event={"ID":"4acfd23c-4a99-4705-9312-fa6e816d7004","Type":"ContainerStarted","Data":"ca57739f6eeebba5d62ef9bf68791c7f6a21c1ce120791dbb6f1fd591ba02c77"} Nov 26 14:37:01 crc kubenswrapper[5037]: I1126 14:37:01.282516 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-8hkwd" event={"ID":"2d64e096-3666-4924-b2c3-31584884abb1","Type":"ContainerStarted","Data":"94c497c74ccf7560b76720c4d84a78e09cb1644e04ebeefd7cfad53ad01a76d7"} Nov 26 14:37:01 crc kubenswrapper[5037]: I1126 14:37:01.284994 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-7szzf" event={"ID":"668635d7-22b8-4fa0-8762-4b3c802cf9cb","Type":"ContainerStarted","Data":"983689d8b3a72d386cd522962f5ed979c03ea2cdeb2b7dd4685e7800566a0fab"} Nov 26 14:37:01 crc kubenswrapper[5037]: I1126 14:37:01.286062 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-hn8b8" event={"ID":"9aa6edf8-6550-4e67-a36c-c1821a4e0778","Type":"ContainerStarted","Data":"ba1c09ff5fed786a1f5887fa14163b1cd7d3a1ea66e63e449d7e627772ed6691"} Nov 26 14:37:01 crc kubenswrapper[5037]: I1126 14:37:01.290748 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-s25dn" event={"ID":"06c51319-7e28-41b4-be90-8262eb3b7307","Type":"ContainerStarted","Data":"5dae08534a87d34f670b22d161e374093a9f64cc5dded287b2ed0ce40eaa7c9f"} Nov 26 14:37:01 crc kubenswrapper[5037]: I1126 14:37:01.347974 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-q4qd8"] Nov 26 14:37:01 crc kubenswrapper[5037]: I1126 14:37:01.367820 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-skfsl"] Nov 26 14:37:01 crc kubenswrapper[5037]: E1126 14:37:01.372548 5037 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:ddc8a82f05930db8ee7a8d6d189b5a66373060656e4baf71ac302f89c477da4c,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-w8nqc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-64cdc6ff96-skfsl_openstack-operators(a5ef500e-85f6-4655-af56-720d8e23d4b0): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 14:37:01 crc kubenswrapper[5037]: E1126 14:37:01.378099 5037 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-w8nqc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-64cdc6ff96-skfsl_openstack-operators(a5ef500e-85f6-4655-af56-720d8e23d4b0): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 14:37:01 crc kubenswrapper[5037]: E1126 14:37:01.379797 5037 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:210517b918e30df1c95fc7d961c8e57e9a9d1cc2b9fe7eb4dad2034dd53a90aa,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-sfjg8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5cd6c7f4c8-fcz7d_openstack-operators(abb38dd1-fa1b-4056-85f7-2ebbe18977b9): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 14:37:01 crc kubenswrapper[5037]: E1126 14:37:01.380146 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-skfsl" podUID="a5ef500e-85f6-4655-af56-720d8e23d4b0" Nov 26 14:37:01 crc kubenswrapper[5037]: E1126 14:37:01.383576 5037 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-sfjg8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5cd6c7f4c8-fcz7d_openstack-operators(abb38dd1-fa1b-4056-85f7-2ebbe18977b9): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 14:37:01 crc kubenswrapper[5037]: E1126 14:37:01.385078 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-fcz7d" podUID="abb38dd1-fa1b-4056-85f7-2ebbe18977b9" Nov 26 14:37:01 crc kubenswrapper[5037]: I1126 14:37:01.389957 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-fcz7d"] Nov 26 14:37:01 crc kubenswrapper[5037]: E1126 14:37:01.394533 5037 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:225958f250a1075b69439d776a13acc45c78695c21abda23600fb53ca1640423,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qf2fz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-57988cc5b5-f5hkj_openstack-operators(bd47a58c-4525-4ca4-9e18-9971afc83d7a): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 14:37:01 crc kubenswrapper[5037]: W1126 14:37:01.413946 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7b497253_ea07_43ac_a78f_d2a145344041.slice/crio-49c90a3cb729875a2fc31e17ba7e9f5350afb948570b7a0c21ba9b100f8f547b WatchSource:0}: Error finding container 49c90a3cb729875a2fc31e17ba7e9f5350afb948570b7a0c21ba9b100f8f547b: Status 404 returned error can't find the container with id 49c90a3cb729875a2fc31e17ba7e9f5350afb948570b7a0c21ba9b100f8f547b Nov 26 14:37:01 crc kubenswrapper[5037]: E1126 14:37:01.426865 5037 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:72236301580ff9080f7e311b832d7ba66666a9afeda51f969745229624ff26e4,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-dvlvq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-d77b94747-9dzl2_openstack-operators(7b497253-ea07-43ac-a78f-d2a145344041): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 14:37:01 crc kubenswrapper[5037]: I1126 14:37:01.426997 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-9dzl2"] Nov 26 14:37:01 crc kubenswrapper[5037]: E1126 14:37:01.432433 5037 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-dvlvq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-d77b94747-9dzl2_openstack-operators(7b497253-ea07-43ac-a78f-d2a145344041): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 14:37:01 crc kubenswrapper[5037]: E1126 14:37:01.433651 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/swift-operator-controller-manager-d77b94747-9dzl2" podUID="7b497253-ea07-43ac-a78f-d2a145344041" Nov 26 14:37:01 crc kubenswrapper[5037]: I1126 14:37:01.444179 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-f5hkj"] Nov 26 14:37:01 crc kubenswrapper[5037]: I1126 14:37:01.547819 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-wrk4n"] Nov 26 14:37:01 crc kubenswrapper[5037]: W1126 14:37:01.556859 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod66ef6065_211b_4aa2_b2f5_6386ee020518.slice/crio-bf836642db5233d1cce045eabf98221e53eacd06da379eae170465b0460c0874 WatchSource:0}: Error finding container bf836642db5233d1cce045eabf98221e53eacd06da379eae170465b0460c0874: Status 404 returned error can't find the container with id bf836642db5233d1cce045eabf98221e53eacd06da379eae170465b0460c0874 Nov 26 14:37:01 crc kubenswrapper[5037]: E1126 14:37:01.560738 5037 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-tz44q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-mjnxg_openstack-operators(66ef6065-211b-4aa2-b2f5-6386ee020518): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 14:37:01 crc kubenswrapper[5037]: E1126 14:37:01.561831 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mjnxg" podUID="66ef6065-211b-4aa2-b2f5-6386ee020518" Nov 26 14:37:01 crc kubenswrapper[5037]: E1126 14:37:01.561956 5037 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-gtc4v,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-76cc84c6bb-wrk4n_openstack-operators(63e61192-9513-41ce-a7f9-983264d63ce8): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 14:37:01 crc kubenswrapper[5037]: E1126 14:37:01.563792 5037 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-gtc4v,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-76cc84c6bb-wrk4n_openstack-operators(63e61192-9513-41ce-a7f9-983264d63ce8): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 14:37:01 crc kubenswrapper[5037]: E1126 14:37:01.565496 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-wrk4n" podUID="63e61192-9513-41ce-a7f9-983264d63ce8" Nov 26 14:37:01 crc kubenswrapper[5037]: I1126 14:37:01.570754 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mjnxg"] Nov 26 14:37:01 crc kubenswrapper[5037]: I1126 14:37:01.575626 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-tsv6x"] Nov 26 14:37:01 crc kubenswrapper[5037]: I1126 14:37:01.731867 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7311c1ce-321d-49a7-b616-6b8f3fb2ce8c-cert\") pod \"openstack-baremetal-operator-controller-manager-674cb676c8xwf2x\" (UID: \"7311c1ce-321d-49a7-b616-6b8f3fb2ce8c\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8xwf2x" Nov 26 14:37:01 crc kubenswrapper[5037]: E1126 14:37:01.732111 5037 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 14:37:01 crc kubenswrapper[5037]: E1126 14:37:01.732162 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7311c1ce-321d-49a7-b616-6b8f3fb2ce8c-cert podName:7311c1ce-321d-49a7-b616-6b8f3fb2ce8c nodeName:}" failed. No retries permitted until 2025-11-26 14:37:03.732146762 +0000 UTC m=+1290.528916946 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/7311c1ce-321d-49a7-b616-6b8f3fb2ce8c-cert") pod "openstack-baremetal-operator-controller-manager-674cb676c8xwf2x" (UID: "7311c1ce-321d-49a7-b616-6b8f3fb2ce8c") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 14:37:02 crc kubenswrapper[5037]: I1126 14:37:02.036256 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2446d4a3-2a56-4a21-9726-19cfcfcfd203-metrics-certs\") pod \"openstack-operator-controller-manager-659d75f7c6-7qgq6\" (UID: \"2446d4a3-2a56-4a21-9726-19cfcfcfd203\") " pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-7qgq6" Nov 26 14:37:02 crc kubenswrapper[5037]: E1126 14:37:02.036719 5037 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 26 14:37:02 crc kubenswrapper[5037]: E1126 14:37:02.040613 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2446d4a3-2a56-4a21-9726-19cfcfcfd203-metrics-certs podName:2446d4a3-2a56-4a21-9726-19cfcfcfd203 nodeName:}" failed. No retries permitted until 2025-11-26 14:37:04.040578396 +0000 UTC m=+1290.837348740 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2446d4a3-2a56-4a21-9726-19cfcfcfd203-metrics-certs") pod "openstack-operator-controller-manager-659d75f7c6-7qgq6" (UID: "2446d4a3-2a56-4a21-9726-19cfcfcfd203") : secret "metrics-server-cert" not found Nov 26 14:37:02 crc kubenswrapper[5037]: I1126 14:37:02.040666 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/2446d4a3-2a56-4a21-9726-19cfcfcfd203-webhook-certs\") pod \"openstack-operator-controller-manager-659d75f7c6-7qgq6\" (UID: \"2446d4a3-2a56-4a21-9726-19cfcfcfd203\") " pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-7qgq6" Nov 26 14:37:02 crc kubenswrapper[5037]: E1126 14:37:02.040907 5037 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 26 14:37:02 crc kubenswrapper[5037]: E1126 14:37:02.041020 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2446d4a3-2a56-4a21-9726-19cfcfcfd203-webhook-certs podName:2446d4a3-2a56-4a21-9726-19cfcfcfd203 nodeName:}" failed. No retries permitted until 2025-11-26 14:37:04.040990066 +0000 UTC m=+1290.837760380 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/2446d4a3-2a56-4a21-9726-19cfcfcfd203-webhook-certs") pod "openstack-operator-controller-manager-659d75f7c6-7qgq6" (UID: "2446d4a3-2a56-4a21-9726-19cfcfcfd203") : secret "webhook-server-cert" not found Nov 26 14:37:02 crc kubenswrapper[5037]: I1126 14:37:02.306210 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-wrk4n" event={"ID":"63e61192-9513-41ce-a7f9-983264d63ce8","Type":"ContainerStarted","Data":"23323e5da9947574dc1bba774ca7aae873559b7c508278d6bdace99245d60593"} Nov 26 14:37:02 crc kubenswrapper[5037]: E1126 14:37:02.309478 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-wrk4n" podUID="63e61192-9513-41ce-a7f9-983264d63ce8" Nov 26 14:37:02 crc kubenswrapper[5037]: I1126 14:37:02.310451 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-q4qd8" event={"ID":"a84b911c-ef23-4267-bfdb-0b9c9d8b9070","Type":"ContainerStarted","Data":"2201a6969f9117da4aa3a5af3d891c7ecfefb343cfd40e7d7d49871e1ff603f0"} Nov 26 14:37:02 crc kubenswrapper[5037]: I1126 14:37:02.312469 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-fcz7d" event={"ID":"abb38dd1-fa1b-4056-85f7-2ebbe18977b9","Type":"ContainerStarted","Data":"1a264e6f219763f85503c79bb9b7c1c2185a70b35d72661d9b524734b503a560"} Nov 26 14:37:02 crc kubenswrapper[5037]: I1126 14:37:02.325315 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mjnxg" event={"ID":"66ef6065-211b-4aa2-b2f5-6386ee020518","Type":"ContainerStarted","Data":"bf836642db5233d1cce045eabf98221e53eacd06da379eae170465b0460c0874"} Nov 26 14:37:02 crc kubenswrapper[5037]: E1126 14:37:02.327814 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:210517b918e30df1c95fc7d961c8e57e9a9d1cc2b9fe7eb4dad2034dd53a90aa\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-fcz7d" podUID="abb38dd1-fa1b-4056-85f7-2ebbe18977b9" Nov 26 14:37:02 crc kubenswrapper[5037]: E1126 14:37:02.332499 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mjnxg" podUID="66ef6065-211b-4aa2-b2f5-6386ee020518" Nov 26 14:37:02 crc kubenswrapper[5037]: I1126 14:37:02.332587 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-f5hkj" event={"ID":"bd47a58c-4525-4ca4-9e18-9971afc83d7a","Type":"ContainerStarted","Data":"13dac1ae5cb7af0f341ce829c36778d3e94460889f2bc156b93a4763f86b695b"} Nov 26 14:37:02 crc kubenswrapper[5037]: I1126 14:37:02.354232 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-9dzl2" event={"ID":"7b497253-ea07-43ac-a78f-d2a145344041","Type":"ContainerStarted","Data":"49c90a3cb729875a2fc31e17ba7e9f5350afb948570b7a0c21ba9b100f8f547b"} Nov 26 14:37:02 crc kubenswrapper[5037]: E1126 14:37:02.357067 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:72236301580ff9080f7e311b832d7ba66666a9afeda51f969745229624ff26e4\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-d77b94747-9dzl2" podUID="7b497253-ea07-43ac-a78f-d2a145344041" Nov 26 14:37:02 crc kubenswrapper[5037]: I1126 14:37:02.357505 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-skfsl" event={"ID":"a5ef500e-85f6-4655-af56-720d8e23d4b0","Type":"ContainerStarted","Data":"e23c294fea05e81af0880675469ab2e7f0ca2a511b41829c043bcafe56c8f296"} Nov 26 14:37:02 crc kubenswrapper[5037]: E1126 14:37:02.359222 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:ddc8a82f05930db8ee7a8d6d189b5a66373060656e4baf71ac302f89c477da4c\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-skfsl" podUID="a5ef500e-85f6-4655-af56-720d8e23d4b0" Nov 26 14:37:02 crc kubenswrapper[5037]: I1126 14:37:02.360349 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-tsv6x" event={"ID":"46f325c2-aa51-4684-aec4-0c31eb822e6d","Type":"ContainerStarted","Data":"b785f94ba2f704f8ec48ac607d7518e1de05d7e8b3cc11eadb797bc77b60361f"} Nov 26 14:37:03 crc kubenswrapper[5037]: I1126 14:37:03.175029 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/98bbf7c3-bf20-4131-8df2-55af39d6c756-cert\") pod \"infra-operator-controller-manager-57548d458d-vdw9h\" (UID: \"98bbf7c3-bf20-4131-8df2-55af39d6c756\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-vdw9h" Nov 26 14:37:03 crc kubenswrapper[5037]: E1126 14:37:03.175343 5037 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 26 14:37:03 crc kubenswrapper[5037]: E1126 14:37:03.175582 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/98bbf7c3-bf20-4131-8df2-55af39d6c756-cert podName:98bbf7c3-bf20-4131-8df2-55af39d6c756 nodeName:}" failed. No retries permitted until 2025-11-26 14:37:07.17554469 +0000 UTC m=+1293.972315034 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/98bbf7c3-bf20-4131-8df2-55af39d6c756-cert") pod "infra-operator-controller-manager-57548d458d-vdw9h" (UID: "98bbf7c3-bf20-4131-8df2-55af39d6c756") : secret "infra-operator-webhook-server-cert" not found Nov 26 14:37:03 crc kubenswrapper[5037]: E1126 14:37:03.370272 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mjnxg" podUID="66ef6065-211b-4aa2-b2f5-6386ee020518" Nov 26 14:37:03 crc kubenswrapper[5037]: E1126 14:37:03.371517 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-wrk4n" podUID="63e61192-9513-41ce-a7f9-983264d63ce8" Nov 26 14:37:03 crc kubenswrapper[5037]: E1126 14:37:03.371692 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:ddc8a82f05930db8ee7a8d6d189b5a66373060656e4baf71ac302f89c477da4c\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-skfsl" podUID="a5ef500e-85f6-4655-af56-720d8e23d4b0" Nov 26 14:37:03 crc kubenswrapper[5037]: E1126 14:37:03.371853 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:210517b918e30df1c95fc7d961c8e57e9a9d1cc2b9fe7eb4dad2034dd53a90aa\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-fcz7d" podUID="abb38dd1-fa1b-4056-85f7-2ebbe18977b9" Nov 26 14:37:03 crc kubenswrapper[5037]: E1126 14:37:03.372315 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:72236301580ff9080f7e311b832d7ba66666a9afeda51f969745229624ff26e4\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-d77b94747-9dzl2" podUID="7b497253-ea07-43ac-a78f-d2a145344041" Nov 26 14:37:03 crc kubenswrapper[5037]: I1126 14:37:03.787420 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7311c1ce-321d-49a7-b616-6b8f3fb2ce8c-cert\") pod \"openstack-baremetal-operator-controller-manager-674cb676c8xwf2x\" (UID: \"7311c1ce-321d-49a7-b616-6b8f3fb2ce8c\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8xwf2x" Nov 26 14:37:03 crc kubenswrapper[5037]: E1126 14:37:03.787670 5037 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 14:37:03 crc kubenswrapper[5037]: E1126 14:37:03.787827 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7311c1ce-321d-49a7-b616-6b8f3fb2ce8c-cert podName:7311c1ce-321d-49a7-b616-6b8f3fb2ce8c nodeName:}" failed. No retries permitted until 2025-11-26 14:37:07.787745914 +0000 UTC m=+1294.584516148 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/7311c1ce-321d-49a7-b616-6b8f3fb2ce8c-cert") pod "openstack-baremetal-operator-controller-manager-674cb676c8xwf2x" (UID: "7311c1ce-321d-49a7-b616-6b8f3fb2ce8c") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 14:37:04 crc kubenswrapper[5037]: E1126 14:37:04.091777 5037 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 26 14:37:04 crc kubenswrapper[5037]: E1126 14:37:04.092081 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2446d4a3-2a56-4a21-9726-19cfcfcfd203-webhook-certs podName:2446d4a3-2a56-4a21-9726-19cfcfcfd203 nodeName:}" failed. No retries permitted until 2025-11-26 14:37:08.092064848 +0000 UTC m=+1294.888835032 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/2446d4a3-2a56-4a21-9726-19cfcfcfd203-webhook-certs") pod "openstack-operator-controller-manager-659d75f7c6-7qgq6" (UID: "2446d4a3-2a56-4a21-9726-19cfcfcfd203") : secret "webhook-server-cert" not found Nov 26 14:37:04 crc kubenswrapper[5037]: I1126 14:37:04.091653 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/2446d4a3-2a56-4a21-9726-19cfcfcfd203-webhook-certs\") pod \"openstack-operator-controller-manager-659d75f7c6-7qgq6\" (UID: \"2446d4a3-2a56-4a21-9726-19cfcfcfd203\") " pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-7qgq6" Nov 26 14:37:04 crc kubenswrapper[5037]: I1126 14:37:04.094031 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2446d4a3-2a56-4a21-9726-19cfcfcfd203-metrics-certs\") pod \"openstack-operator-controller-manager-659d75f7c6-7qgq6\" (UID: \"2446d4a3-2a56-4a21-9726-19cfcfcfd203\") " pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-7qgq6" Nov 26 14:37:04 crc kubenswrapper[5037]: E1126 14:37:04.094927 5037 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 26 14:37:04 crc kubenswrapper[5037]: E1126 14:37:04.094975 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2446d4a3-2a56-4a21-9726-19cfcfcfd203-metrics-certs podName:2446d4a3-2a56-4a21-9726-19cfcfcfd203 nodeName:}" failed. No retries permitted until 2025-11-26 14:37:08.094960178 +0000 UTC m=+1294.891730362 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2446d4a3-2a56-4a21-9726-19cfcfcfd203-metrics-certs") pod "openstack-operator-controller-manager-659d75f7c6-7qgq6" (UID: "2446d4a3-2a56-4a21-9726-19cfcfcfd203") : secret "metrics-server-cert" not found Nov 26 14:37:07 crc kubenswrapper[5037]: I1126 14:37:07.243748 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/98bbf7c3-bf20-4131-8df2-55af39d6c756-cert\") pod \"infra-operator-controller-manager-57548d458d-vdw9h\" (UID: \"98bbf7c3-bf20-4131-8df2-55af39d6c756\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-vdw9h" Nov 26 14:37:07 crc kubenswrapper[5037]: E1126 14:37:07.244001 5037 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 26 14:37:07 crc kubenswrapper[5037]: E1126 14:37:07.244147 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/98bbf7c3-bf20-4131-8df2-55af39d6c756-cert podName:98bbf7c3-bf20-4131-8df2-55af39d6c756 nodeName:}" failed. No retries permitted until 2025-11-26 14:37:15.244115084 +0000 UTC m=+1302.040885308 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/98bbf7c3-bf20-4131-8df2-55af39d6c756-cert") pod "infra-operator-controller-manager-57548d458d-vdw9h" (UID: "98bbf7c3-bf20-4131-8df2-55af39d6c756") : secret "infra-operator-webhook-server-cert" not found Nov 26 14:37:07 crc kubenswrapper[5037]: I1126 14:37:07.852339 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7311c1ce-321d-49a7-b616-6b8f3fb2ce8c-cert\") pod \"openstack-baremetal-operator-controller-manager-674cb676c8xwf2x\" (UID: \"7311c1ce-321d-49a7-b616-6b8f3fb2ce8c\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8xwf2x" Nov 26 14:37:07 crc kubenswrapper[5037]: E1126 14:37:07.852553 5037 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 14:37:07 crc kubenswrapper[5037]: E1126 14:37:07.852843 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7311c1ce-321d-49a7-b616-6b8f3fb2ce8c-cert podName:7311c1ce-321d-49a7-b616-6b8f3fb2ce8c nodeName:}" failed. No retries permitted until 2025-11-26 14:37:15.852822873 +0000 UTC m=+1302.649593057 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/7311c1ce-321d-49a7-b616-6b8f3fb2ce8c-cert") pod "openstack-baremetal-operator-controller-manager-674cb676c8xwf2x" (UID: "7311c1ce-321d-49a7-b616-6b8f3fb2ce8c") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 14:37:08 crc kubenswrapper[5037]: I1126 14:37:08.156849 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2446d4a3-2a56-4a21-9726-19cfcfcfd203-metrics-certs\") pod \"openstack-operator-controller-manager-659d75f7c6-7qgq6\" (UID: \"2446d4a3-2a56-4a21-9726-19cfcfcfd203\") " pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-7qgq6" Nov 26 14:37:08 crc kubenswrapper[5037]: I1126 14:37:08.156942 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/2446d4a3-2a56-4a21-9726-19cfcfcfd203-webhook-certs\") pod \"openstack-operator-controller-manager-659d75f7c6-7qgq6\" (UID: \"2446d4a3-2a56-4a21-9726-19cfcfcfd203\") " pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-7qgq6" Nov 26 14:37:08 crc kubenswrapper[5037]: E1126 14:37:08.157018 5037 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 26 14:37:08 crc kubenswrapper[5037]: E1126 14:37:08.157079 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2446d4a3-2a56-4a21-9726-19cfcfcfd203-metrics-certs podName:2446d4a3-2a56-4a21-9726-19cfcfcfd203 nodeName:}" failed. No retries permitted until 2025-11-26 14:37:16.157062615 +0000 UTC m=+1302.953832799 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2446d4a3-2a56-4a21-9726-19cfcfcfd203-metrics-certs") pod "openstack-operator-controller-manager-659d75f7c6-7qgq6" (UID: "2446d4a3-2a56-4a21-9726-19cfcfcfd203") : secret "metrics-server-cert" not found Nov 26 14:37:08 crc kubenswrapper[5037]: E1126 14:37:08.157091 5037 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 26 14:37:08 crc kubenswrapper[5037]: E1126 14:37:08.157128 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2446d4a3-2a56-4a21-9726-19cfcfcfd203-webhook-certs podName:2446d4a3-2a56-4a21-9726-19cfcfcfd203 nodeName:}" failed. No retries permitted until 2025-11-26 14:37:16.157117556 +0000 UTC m=+1302.953887740 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/2446d4a3-2a56-4a21-9726-19cfcfcfd203-webhook-certs") pod "openstack-operator-controller-manager-659d75f7c6-7qgq6" (UID: "2446d4a3-2a56-4a21-9726-19cfcfcfd203") : secret "webhook-server-cert" not found Nov 26 14:37:11 crc kubenswrapper[5037]: I1126 14:37:11.247503 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 14:37:11 crc kubenswrapper[5037]: I1126 14:37:11.247583 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 14:37:11 crc kubenswrapper[5037]: I1126 14:37:11.247650 5037 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" Nov 26 14:37:11 crc kubenswrapper[5037]: I1126 14:37:11.248441 5037 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"302cbe16bdb6c8873822bf0697d168f893d8457e80a7e1227846608f32db69c8"} pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 14:37:11 crc kubenswrapper[5037]: I1126 14:37:11.248536 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" containerID="cri-o://302cbe16bdb6c8873822bf0697d168f893d8457e80a7e1227846608f32db69c8" gracePeriod=600 Nov 26 14:37:13 crc kubenswrapper[5037]: I1126 14:37:13.466167 5037 generic.go:334] "Generic (PLEG): container finished" podID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerID="302cbe16bdb6c8873822bf0697d168f893d8457e80a7e1227846608f32db69c8" exitCode=0 Nov 26 14:37:13 crc kubenswrapper[5037]: I1126 14:37:13.466248 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" event={"ID":"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb","Type":"ContainerDied","Data":"302cbe16bdb6c8873822bf0697d168f893d8457e80a7e1227846608f32db69c8"} Nov 26 14:37:13 crc kubenswrapper[5037]: I1126 14:37:13.466539 5037 scope.go:117] "RemoveContainer" containerID="36a7c42fc7524fe0b2d1a2075eae30f52f037f2969e9db7800448ccd49cfcc57" Nov 26 14:37:14 crc kubenswrapper[5037]: E1126 14:37:14.025786 5037 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/watcher-operator@sha256:6bed55b172b9ee8ccc3952cbfc543d8bd44e2690f6db94348a754152fd78f4cf" Nov 26 14:37:14 crc kubenswrapper[5037]: E1126 14:37:14.026015 5037 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:6bed55b172b9ee8ccc3952cbfc543d8bd44e2690f6db94348a754152fd78f4cf,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qhdf2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-656dcb59d4-tsv6x_openstack-operators(46f325c2-aa51-4684-aec4-0c31eb822e6d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 14:37:14 crc kubenswrapper[5037]: I1126 14:37:14.047222 5037 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 14:37:14 crc kubenswrapper[5037]: E1126 14:37:14.462758 5037 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/neutron-operator@sha256:e00a9ed0ab26c5b745bd804ab1fe6b22428d026f17ea05a05f045e060342f46c" Nov 26 14:37:14 crc kubenswrapper[5037]: E1126 14:37:14.462972 5037 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/neutron-operator@sha256:e00a9ed0ab26c5b745bd804ab1fe6b22428d026f17ea05a05f045e060342f46c,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xddv6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-6fdcddb789-shv5p_openstack-operators(4acfd23c-4a99-4705-9312-fa6e816d7004): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 14:37:15 crc kubenswrapper[5037]: E1126 14:37:15.007792 5037 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/heat-operator@sha256:2ee37ff474bee3203447df4f326a9279a515e770573153338296dd074722c677" Nov 26 14:37:15 crc kubenswrapper[5037]: E1126 14:37:15.007998 5037 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/heat-operator@sha256:2ee37ff474bee3203447df4f326a9279a515e770573153338296dd074722c677,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-pfh5c,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-operator-controller-manager-5b77f656f-927td_openstack-operators(aa8c9234-d8b0-4975-b4c4-83496196179f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 14:37:15 crc kubenswrapper[5037]: I1126 14:37:15.271835 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/98bbf7c3-bf20-4131-8df2-55af39d6c756-cert\") pod \"infra-operator-controller-manager-57548d458d-vdw9h\" (UID: \"98bbf7c3-bf20-4131-8df2-55af39d6c756\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-vdw9h" Nov 26 14:37:15 crc kubenswrapper[5037]: I1126 14:37:15.290166 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/98bbf7c3-bf20-4131-8df2-55af39d6c756-cert\") pod \"infra-operator-controller-manager-57548d458d-vdw9h\" (UID: \"98bbf7c3-bf20-4131-8df2-55af39d6c756\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-vdw9h" Nov 26 14:37:15 crc kubenswrapper[5037]: I1126 14:37:15.308854 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-57548d458d-vdw9h" Nov 26 14:37:15 crc kubenswrapper[5037]: I1126 14:37:15.880829 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7311c1ce-321d-49a7-b616-6b8f3fb2ce8c-cert\") pod \"openstack-baremetal-operator-controller-manager-674cb676c8xwf2x\" (UID: \"7311c1ce-321d-49a7-b616-6b8f3fb2ce8c\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8xwf2x" Nov 26 14:37:15 crc kubenswrapper[5037]: I1126 14:37:15.884840 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7311c1ce-321d-49a7-b616-6b8f3fb2ce8c-cert\") pod \"openstack-baremetal-operator-controller-manager-674cb676c8xwf2x\" (UID: \"7311c1ce-321d-49a7-b616-6b8f3fb2ce8c\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8xwf2x" Nov 26 14:37:15 crc kubenswrapper[5037]: I1126 14:37:15.913705 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8xwf2x" Nov 26 14:37:16 crc kubenswrapper[5037]: I1126 14:37:16.186887 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2446d4a3-2a56-4a21-9726-19cfcfcfd203-metrics-certs\") pod \"openstack-operator-controller-manager-659d75f7c6-7qgq6\" (UID: \"2446d4a3-2a56-4a21-9726-19cfcfcfd203\") " pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-7qgq6" Nov 26 14:37:16 crc kubenswrapper[5037]: I1126 14:37:16.187130 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/2446d4a3-2a56-4a21-9726-19cfcfcfd203-webhook-certs\") pod \"openstack-operator-controller-manager-659d75f7c6-7qgq6\" (UID: \"2446d4a3-2a56-4a21-9726-19cfcfcfd203\") " pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-7qgq6" Nov 26 14:37:16 crc kubenswrapper[5037]: I1126 14:37:16.209729 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/2446d4a3-2a56-4a21-9726-19cfcfcfd203-webhook-certs\") pod \"openstack-operator-controller-manager-659d75f7c6-7qgq6\" (UID: \"2446d4a3-2a56-4a21-9726-19cfcfcfd203\") " pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-7qgq6" Nov 26 14:37:16 crc kubenswrapper[5037]: I1126 14:37:16.220036 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2446d4a3-2a56-4a21-9726-19cfcfcfd203-metrics-certs\") pod \"openstack-operator-controller-manager-659d75f7c6-7qgq6\" (UID: \"2446d4a3-2a56-4a21-9726-19cfcfcfd203\") " pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-7qgq6" Nov 26 14:37:16 crc kubenswrapper[5037]: I1126 14:37:16.260836 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-7qgq6" Nov 26 14:37:22 crc kubenswrapper[5037]: E1126 14:37:22.381536 5037 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/mariadb-operator@sha256:888edf6f432e52eaa5fc3caeae616fe38a3302b006bbba0e38885b2beba9f0f2" Nov 26 14:37:22 crc kubenswrapper[5037]: E1126 14:37:22.383955 5037 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/mariadb-operator@sha256:888edf6f432e52eaa5fc3caeae616fe38a3302b006bbba0e38885b2beba9f0f2,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ghv94,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-66f4dd4bc7-z7cq5_openstack-operators(8da78b02-ca91-4fca-8710-875bfdd6e6a9): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 14:37:22 crc kubenswrapper[5037]: E1126 14:37:22.664829 5037 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/barbican-operator@sha256:3dbf9fd9dce75f1fb250ee4c4097ad77d2f34110b61d85e37abd9c472e022e6c" Nov 26 14:37:22 crc kubenswrapper[5037]: E1126 14:37:22.665058 5037 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/barbican-operator@sha256:3dbf9fd9dce75f1fb250ee4c4097ad77d2f34110b61d85e37abd9c472e022e6c,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-bv7km,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-operator-controller-manager-7b64f4fb85-7qg65_openstack-operators(dad0150e-fc25-4245-ad22-e940fadd107a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 14:37:23 crc kubenswrapper[5037]: E1126 14:37:23.309629 5037 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/manila-operator@sha256:89910bc3ecceb7590d3207ac294eb7354de358cf39ef03c72323b26c598e50e6" Nov 26 14:37:23 crc kubenswrapper[5037]: E1126 14:37:23.310214 5037 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/manila-operator@sha256:89910bc3ecceb7590d3207ac294eb7354de358cf39ef03c72323b26c598e50e6,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-989d5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-5d499bf58b-q6n7b_openstack-operators(3982528b-3a86-43af-a0af-2f0ddd71e349): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 14:37:25 crc kubenswrapper[5037]: E1126 14:37:25.787610 5037 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:25faa5b0e4801d4d3b01a28b877ed3188eee71f33ad66f3c2e86b7921758e711" Nov 26 14:37:25 crc kubenswrapper[5037]: E1126 14:37:25.787902 5037 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:25faa5b0e4801d4d3b01a28b877ed3188eee71f33ad66f3c2e86b7921758e711,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-dxjfx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-7b4567c7cf-bgknz_openstack-operators(8e942820-209d-40b6-bd79-1836b7af00bb): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 14:37:30 crc kubenswrapper[5037]: I1126 14:37:30.767117 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-vdw9h"] Nov 26 14:37:31 crc kubenswrapper[5037]: I1126 14:37:31.990246 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8xwf2x"] Nov 26 14:37:32 crc kubenswrapper[5037]: I1126 14:37:32.065915 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-659d75f7c6-7qgq6"] Nov 26 14:37:32 crc kubenswrapper[5037]: I1126 14:37:32.635012 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8xwf2x" event={"ID":"7311c1ce-321d-49a7-b616-6b8f3fb2ce8c","Type":"ContainerStarted","Data":"9d5f9138ccba16876cf24b8478f81e0aa6e27ed66f6be74bb7ce4ddd39c4f8d9"} Nov 26 14:37:32 crc kubenswrapper[5037]: I1126 14:37:32.636579 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-vdw9h" event={"ID":"98bbf7c3-bf20-4131-8df2-55af39d6c756","Type":"ContainerStarted","Data":"8db19b44b04401c33c576069d1fee12ac08d7f0ebb5cbfa0cfb37d663cc0b203"} Nov 26 14:37:32 crc kubenswrapper[5037]: I1126 14:37:32.640359 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" event={"ID":"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb","Type":"ContainerStarted","Data":"5e69d7717514aa68d798cc4f8eee9b2d5d3e9666ca3b110c2cb4c6b90f9e1181"} Nov 26 14:37:32 crc kubenswrapper[5037]: W1126 14:37:32.898717 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2446d4a3_2a56_4a21_9726_19cfcfcfd203.slice/crio-d469ff65d9a310ec83fe9e961fba28679c55801ce379b152d4902342e9750d8d WatchSource:0}: Error finding container d469ff65d9a310ec83fe9e961fba28679c55801ce379b152d4902342e9750d8d: Status 404 returned error can't find the container with id d469ff65d9a310ec83fe9e961fba28679c55801ce379b152d4902342e9750d8d Nov 26 14:37:32 crc kubenswrapper[5037]: E1126 14:37:32.978020 5037 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Nov 26 14:37:32 crc kubenswrapper[5037]: E1126 14:37:32.978249 5037 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qf2fz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-57988cc5b5-f5hkj_openstack-operators(bd47a58c-4525-4ca4-9e18-9971afc83d7a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 14:37:32 crc kubenswrapper[5037]: E1126 14:37:32.979543 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"]" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-f5hkj" podUID="bd47a58c-4525-4ca4-9e18-9971afc83d7a" Nov 26 14:37:33 crc kubenswrapper[5037]: I1126 14:37:33.695645 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-s25dn" event={"ID":"06c51319-7e28-41b4-be90-8262eb3b7307","Type":"ContainerStarted","Data":"1898ec1ee5315ab5a6fa7a5a76a0f5b66f62caeffa23836f806564ce084459d8"} Nov 26 14:37:34 crc kubenswrapper[5037]: I1126 14:37:33.725137 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-k67q7" event={"ID":"4fbefccf-1879-4d21-a312-44f95a16545b","Type":"ContainerStarted","Data":"7f4d89e320adf938f5f635245610a7e6681dff81abc22518e3ef4278c5d48dc0"} Nov 26 14:37:34 crc kubenswrapper[5037]: I1126 14:37:33.740273 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-ndntx" event={"ID":"4d00a1ec-3ee8-4166-b497-e96629f2e92a","Type":"ContainerStarted","Data":"b61e7ef727f146f70870c6843b50459a2b0cb59c7da459d4e84970b2951b063c"} Nov 26 14:37:34 crc kubenswrapper[5037]: I1126 14:37:33.755424 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-7szzf" event={"ID":"668635d7-22b8-4fa0-8762-4b3c802cf9cb","Type":"ContainerStarted","Data":"29b5c00d6ce8f728b8617686a3a16f3879185332f38c42295ec2866d1abd98f3"} Nov 26 14:37:34 crc kubenswrapper[5037]: I1126 14:37:33.764053 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-9dzl2" event={"ID":"7b497253-ea07-43ac-a78f-d2a145344041","Type":"ContainerStarted","Data":"e79756ed2ac76cd901fd0b09b4f63d25f1817a7c4280b8af67ce33c4299338e4"} Nov 26 14:37:34 crc kubenswrapper[5037]: I1126 14:37:33.766758 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-hn8b8" event={"ID":"9aa6edf8-6550-4e67-a36c-c1821a4e0778","Type":"ContainerStarted","Data":"ef9a6887c3f4ea01d8519d1cbe527ce551ec046c310b2f4e7a762359ab953d0e"} Nov 26 14:37:34 crc kubenswrapper[5037]: I1126 14:37:33.769668 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-8hkwd" event={"ID":"2d64e096-3666-4924-b2c3-31584884abb1","Type":"ContainerStarted","Data":"d62bea828c72d8582e0a0c980c468098c3e7840c16f0acdf68616f475de675f0"} Nov 26 14:37:34 crc kubenswrapper[5037]: I1126 14:37:33.773130 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-q4qd8" event={"ID":"a84b911c-ef23-4267-bfdb-0b9c9d8b9070","Type":"ContainerStarted","Data":"157500b81454cb17760f657d6af2ba2a1d57bf317572a2c7607aded7fcde87b5"} Nov 26 14:37:34 crc kubenswrapper[5037]: I1126 14:37:33.777366 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-7qgq6" event={"ID":"2446d4a3-2a56-4a21-9726-19cfcfcfd203","Type":"ContainerStarted","Data":"f679fe3879d9fb3cc8b24d604daa7f081297693450dd99ad37c45c733efe8632"} Nov 26 14:37:34 crc kubenswrapper[5037]: I1126 14:37:33.777397 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-7qgq6" event={"ID":"2446d4a3-2a56-4a21-9726-19cfcfcfd203","Type":"ContainerStarted","Data":"d469ff65d9a310ec83fe9e961fba28679c55801ce379b152d4902342e9750d8d"} Nov 26 14:37:34 crc kubenswrapper[5037]: I1126 14:37:33.778038 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-7qgq6" Nov 26 14:37:34 crc kubenswrapper[5037]: I1126 14:37:33.814690 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-7qgq6" podStartSLOduration=34.814661361 podStartE2EDuration="34.814661361s" podCreationTimestamp="2025-11-26 14:36:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:37:33.8047935 +0000 UTC m=+1320.601563684" watchObservedRunningTime="2025-11-26 14:37:33.814661361 +0000 UTC m=+1320.611431555" Nov 26 14:37:34 crc kubenswrapper[5037]: I1126 14:37:34.875411 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-fcz7d" event={"ID":"abb38dd1-fa1b-4056-85f7-2ebbe18977b9","Type":"ContainerStarted","Data":"ee7259640e5a2fb988527bf79cd8ae4339b048e43a5c26572c28e3a60c7b3ff3"} Nov 26 14:37:34 crc kubenswrapper[5037]: I1126 14:37:34.881742 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-skfsl" event={"ID":"a5ef500e-85f6-4655-af56-720d8e23d4b0","Type":"ContainerStarted","Data":"b91082803048e589d4bde2e63070c9b20580ac30f57506f769e9afc6cf47a30d"} Nov 26 14:37:34 crc kubenswrapper[5037]: I1126 14:37:34.933356 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-wrk4n" event={"ID":"63e61192-9513-41ce-a7f9-983264d63ce8","Type":"ContainerStarted","Data":"3ca9405b9d6e0659c27d3ed47f7b1c006349b69fa7122a3a37542f8e83f2941a"} Nov 26 14:37:35 crc kubenswrapper[5037]: I1126 14:37:35.944592 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mjnxg" event={"ID":"66ef6065-211b-4aa2-b2f5-6386ee020518","Type":"ContainerStarted","Data":"9176d99811f93876a79983017cfcc3d0199f27051142eb5d49a09d4ab78844ca"} Nov 26 14:37:35 crc kubenswrapper[5037]: I1126 14:37:35.950492 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-hn8b8" event={"ID":"9aa6edf8-6550-4e67-a36c-c1821a4e0778","Type":"ContainerStarted","Data":"a7d34f06ff59597aa4afc0b8f42df10ccc61568c2097cd30313f0abed7729939"} Nov 26 14:37:35 crc kubenswrapper[5037]: I1126 14:37:35.950594 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-hn8b8" Nov 26 14:37:35 crc kubenswrapper[5037]: I1126 14:37:35.954191 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-s25dn" event={"ID":"06c51319-7e28-41b4-be90-8262eb3b7307","Type":"ContainerStarted","Data":"2364a47cb8ee156106ad2f8699cb214dab1bd52147b5ab5acbf58e5f9ea1c962"} Nov 26 14:37:35 crc kubenswrapper[5037]: I1126 14:37:35.955199 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-s25dn" Nov 26 14:37:35 crc kubenswrapper[5037]: I1126 14:37:35.958398 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-k67q7" event={"ID":"4fbefccf-1879-4d21-a312-44f95a16545b","Type":"ContainerStarted","Data":"d45d7259c3bbd43e27f5460f5f2509a53f8b8bf7c376fcc1ad1afa0199ddab20"} Nov 26 14:37:35 crc kubenswrapper[5037]: I1126 14:37:35.958775 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-955677c94-k67q7" Nov 26 14:37:35 crc kubenswrapper[5037]: I1126 14:37:35.973472 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mjnxg" podStartSLOduration=4.211023957 podStartE2EDuration="35.973453803s" podCreationTimestamp="2025-11-26 14:37:00 +0000 UTC" firstStartedPulling="2025-11-26 14:37:01.56054399 +0000 UTC m=+1288.357314174" lastFinishedPulling="2025-11-26 14:37:33.322973836 +0000 UTC m=+1320.119744020" observedRunningTime="2025-11-26 14:37:35.969765554 +0000 UTC m=+1322.766535738" watchObservedRunningTime="2025-11-26 14:37:35.973453803 +0000 UTC m=+1322.770223987" Nov 26 14:37:35 crc kubenswrapper[5037]: I1126 14:37:35.998998 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-s25dn" podStartSLOduration=10.40822241 podStartE2EDuration="36.998974466s" podCreationTimestamp="2025-11-26 14:36:59 +0000 UTC" firstStartedPulling="2025-11-26 14:37:00.931482424 +0000 UTC m=+1287.728252618" lastFinishedPulling="2025-11-26 14:37:27.52223449 +0000 UTC m=+1314.319004674" observedRunningTime="2025-11-26 14:37:35.990337025 +0000 UTC m=+1322.787107209" watchObservedRunningTime="2025-11-26 14:37:35.998974466 +0000 UTC m=+1322.795744660" Nov 26 14:37:36 crc kubenswrapper[5037]: I1126 14:37:36.035075 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-hn8b8" podStartSLOduration=10.653606632 podStartE2EDuration="37.034897032s" podCreationTimestamp="2025-11-26 14:36:59 +0000 UTC" firstStartedPulling="2025-11-26 14:37:01.141384941 +0000 UTC m=+1287.938155125" lastFinishedPulling="2025-11-26 14:37:27.522675341 +0000 UTC m=+1314.319445525" observedRunningTime="2025-11-26 14:37:36.029228264 +0000 UTC m=+1322.825998448" watchObservedRunningTime="2025-11-26 14:37:36.034897032 +0000 UTC m=+1322.831667226" Nov 26 14:37:36 crc kubenswrapper[5037]: I1126 14:37:36.072141 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-955677c94-k67q7" podStartSLOduration=10.569084989 podStartE2EDuration="37.07212103s" podCreationTimestamp="2025-11-26 14:36:59 +0000 UTC" firstStartedPulling="2025-11-26 14:37:00.212191743 +0000 UTC m=+1287.008961927" lastFinishedPulling="2025-11-26 14:37:26.715227784 +0000 UTC m=+1313.511997968" observedRunningTime="2025-11-26 14:37:36.070163913 +0000 UTC m=+1322.866934107" watchObservedRunningTime="2025-11-26 14:37:36.07212103 +0000 UTC m=+1322.868891214" Nov 26 14:37:36 crc kubenswrapper[5037]: E1126 14:37:36.171560 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-shv5p" podUID="4acfd23c-4a99-4705-9312-fa6e816d7004" Nov 26 14:37:36 crc kubenswrapper[5037]: I1126 14:37:36.977558 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-ndntx" event={"ID":"4d00a1ec-3ee8-4166-b497-e96629f2e92a","Type":"ContainerStarted","Data":"05d483620a5b6ac390a7b2fd7e8088aec1f24e3495d58a028a7a5c1072ea98d8"} Nov 26 14:37:36 crc kubenswrapper[5037]: I1126 14:37:36.977648 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-ndntx" Nov 26 14:37:36 crc kubenswrapper[5037]: I1126 14:37:36.978903 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-shv5p" event={"ID":"4acfd23c-4a99-4705-9312-fa6e816d7004","Type":"ContainerStarted","Data":"bf3cadc9734f8d6c7bad46f65e1fb557d0116387e45784591c596fe443c04615"} Nov 26 14:37:36 crc kubenswrapper[5037]: I1126 14:37:36.996945 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-ndntx" podStartSLOduration=12.075742153 podStartE2EDuration="37.99692591s" podCreationTimestamp="2025-11-26 14:36:59 +0000 UTC" firstStartedPulling="2025-11-26 14:37:00.794144209 +0000 UTC m=+1287.590914383" lastFinishedPulling="2025-11-26 14:37:26.715327956 +0000 UTC m=+1313.512098140" observedRunningTime="2025-11-26 14:37:36.990999986 +0000 UTC m=+1323.787770180" watchObservedRunningTime="2025-11-26 14:37:36.99692591 +0000 UTC m=+1323.793696094" Nov 26 14:37:37 crc kubenswrapper[5037]: E1126 14:37:37.229342 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-927td" podUID="aa8c9234-d8b0-4975-b4c4-83496196179f" Nov 26 14:37:37 crc kubenswrapper[5037]: I1126 14:37:37.988742 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-927td" event={"ID":"aa8c9234-d8b0-4975-b4c4-83496196179f","Type":"ContainerStarted","Data":"ae3b150b54bed7d2ee11cf34ba1c39b94dfc82dcd0c8d6692ade5f3d535a6f7f"} Nov 26 14:37:38 crc kubenswrapper[5037]: E1126 14:37:38.075126 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-tsv6x" podUID="46f325c2-aa51-4684-aec4-0c31eb822e6d" Nov 26 14:37:38 crc kubenswrapper[5037]: E1126 14:37:38.083160 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-q6n7b" podUID="3982528b-3a86-43af-a0af-2f0ddd71e349" Nov 26 14:37:38 crc kubenswrapper[5037]: E1126 14:37:38.460322 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-z7cq5" podUID="8da78b02-ca91-4fca-8710-875bfdd6e6a9" Nov 26 14:37:38 crc kubenswrapper[5037]: E1126 14:37:38.462938 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-bgknz" podUID="8e942820-209d-40b6-bd79-1836b7af00bb" Nov 26 14:37:38 crc kubenswrapper[5037]: E1126 14:37:38.595088 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-7qg65" podUID="dad0150e-fc25-4245-ad22-e940fadd107a" Nov 26 14:37:38 crc kubenswrapper[5037]: I1126 14:37:38.997398 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-7qg65" event={"ID":"dad0150e-fc25-4245-ad22-e940fadd107a","Type":"ContainerStarted","Data":"c8dc27c774f3badffcdf0acd0e61f9115108d25eb2f685be595c29321d31462a"} Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.000322 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-z7cq5" event={"ID":"8da78b02-ca91-4fca-8710-875bfdd6e6a9","Type":"ContainerStarted","Data":"abaedc2b55a8db34ea110456614df9169b74ef8eaf8bc78e6a61620f5d2e8142"} Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.003805 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-tsv6x" event={"ID":"46f325c2-aa51-4684-aec4-0c31eb822e6d","Type":"ContainerStarted","Data":"c9926f898212893e7839c78934cf2b0478006210ccbe5c0bb194a2e9c7c1392f"} Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.006382 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-vdw9h" event={"ID":"98bbf7c3-bf20-4131-8df2-55af39d6c756","Type":"ContainerStarted","Data":"5779bc2c7cceeefa3239dcb9ab9c808142cfdad4346b316ca6493fedd7d9ff62"} Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.006414 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-vdw9h" event={"ID":"98bbf7c3-bf20-4131-8df2-55af39d6c756","Type":"ContainerStarted","Data":"40b4a6d2210ab9127327c785b8311d3fd846bbf3410e1526b5884cde4457b671"} Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.008882 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-wrk4n" event={"ID":"63e61192-9513-41ce-a7f9-983264d63ce8","Type":"ContainerStarted","Data":"3008dc7080720b902701d43101b09b330e5a1ec8920a87ae7f23456a9cbe88a5"} Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.009220 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-wrk4n" Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.010848 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-q6n7b" event={"ID":"3982528b-3a86-43af-a0af-2f0ddd71e349","Type":"ContainerStarted","Data":"45a2ae8905f293f9f5d60aef688f8965948c864619b6dd1ee2fffe4324415dcd"} Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.010919 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-wrk4n" Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.012425 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-fcz7d" event={"ID":"abb38dd1-fa1b-4056-85f7-2ebbe18977b9","Type":"ContainerStarted","Data":"ce6876e93b708e4746f80018c9aaecb341d77e7aa9fcf7ba61f0b7ea6a7aee7c"} Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.012643 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-fcz7d" Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.015135 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-fcz7d" Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.016744 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-7szzf" event={"ID":"668635d7-22b8-4fa0-8762-4b3c802cf9cb","Type":"ContainerStarted","Data":"85dc433ad0bacda6f0b05f329544260ca111ccd6dad8b8bb550a2a20376224b0"} Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.016952 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-7szzf" Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.018511 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-9dzl2" event={"ID":"7b497253-ea07-43ac-a78f-d2a145344041","Type":"ContainerStarted","Data":"f74963335c45485b6b1f20d60859fa030e160ecb826d1728766a6a79fdd4880a"} Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.018630 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-d77b94747-9dzl2" Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.019974 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-7szzf" Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.020300 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-8hkwd" event={"ID":"2d64e096-3666-4924-b2c3-31584884abb1","Type":"ContainerStarted","Data":"caebbc95eaaa9b240fc10d21d1cf8e56ba83c8ec095b978b1453895764870978"} Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.020504 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-8hkwd" Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.022317 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-skfsl" event={"ID":"a5ef500e-85f6-4655-af56-720d8e23d4b0","Type":"ContainerStarted","Data":"facbee3656fd1fe6464ec3afe42d4ed19d25548193eeb2225134f36e4c843521"} Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.022502 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-skfsl" Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.022892 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-d77b94747-9dzl2" Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.023924 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-8hkwd" Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.024463 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-q4qd8" event={"ID":"a84b911c-ef23-4267-bfdb-0b9c9d8b9070","Type":"ContainerStarted","Data":"1da09adfa043e676581bfb91ade3a68e3995308d269fdde6c87c5ac94f9c21df"} Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.024698 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-q4qd8" Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.024774 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-skfsl" Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.026666 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-q4qd8" Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.027229 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-927td" event={"ID":"aa8c9234-d8b0-4975-b4c4-83496196179f","Type":"ContainerStarted","Data":"8f24196e4ef1d42acec814cb7719236255de22d6ee61e6a38ca09c7f13486749"} Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.027340 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-927td" Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.028718 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8xwf2x" event={"ID":"7311c1ce-321d-49a7-b616-6b8f3fb2ce8c","Type":"ContainerStarted","Data":"1afb83949f4ff69b31bd7154969efab80a23d49515eb1bc7c8fb164e65e2e715"} Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.028755 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8xwf2x" event={"ID":"7311c1ce-321d-49a7-b616-6b8f3fb2ce8c","Type":"ContainerStarted","Data":"5d54e5a512ed85789570a5905bccc2afd4ede0db8eefc394e976587d91a2417f"} Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.028840 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8xwf2x" Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.029754 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-bgknz" event={"ID":"8e942820-209d-40b6-bd79-1836b7af00bb","Type":"ContainerStarted","Data":"fc2736701b25cba69b9bda2af95d9c295e2e47b6e531efd2007a8ddedeaf79d2"} Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.031248 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-f5hkj" event={"ID":"bd47a58c-4525-4ca4-9e18-9971afc83d7a","Type":"ContainerStarted","Data":"07863af5df206636630eedc30d78d2c70b88380c65ebb3ae2e5ad0412d5cd320"} Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.031280 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-f5hkj" event={"ID":"bd47a58c-4525-4ca4-9e18-9971afc83d7a","Type":"ContainerStarted","Data":"9c02d09c3f0ebe237a2b8e780152b8294ed670790f6014d006f4e59d69694fae"} Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.031685 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-f5hkj" Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.034569 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-shv5p" event={"ID":"4acfd23c-4a99-4705-9312-fa6e816d7004","Type":"ContainerStarted","Data":"52817e62d6bfa1dfb7e9da4b58c6b24e104f57f50576b1c910ce0ebb1e11e554"} Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.034939 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-shv5p" Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.105047 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-d77b94747-9dzl2" podStartSLOduration=11.175882842 podStartE2EDuration="40.105032457s" podCreationTimestamp="2025-11-26 14:36:59 +0000 UTC" firstStartedPulling="2025-11-26 14:37:01.426372762 +0000 UTC m=+1288.223142946" lastFinishedPulling="2025-11-26 14:37:30.355522377 +0000 UTC m=+1317.152292561" observedRunningTime="2025-11-26 14:37:39.101482251 +0000 UTC m=+1325.898252445" watchObservedRunningTime="2025-11-26 14:37:39.105032457 +0000 UTC m=+1325.901802641" Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.133366 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-skfsl" podStartSLOduration=9.414384249 podStartE2EDuration="40.133340727s" podCreationTimestamp="2025-11-26 14:36:59 +0000 UTC" firstStartedPulling="2025-11-26 14:37:01.372278291 +0000 UTC m=+1288.169048475" lastFinishedPulling="2025-11-26 14:37:32.091234779 +0000 UTC m=+1318.888004953" observedRunningTime="2025-11-26 14:37:39.126988693 +0000 UTC m=+1325.923758887" watchObservedRunningTime="2025-11-26 14:37:39.133340727 +0000 UTC m=+1325.930110931" Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.167654 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8xwf2x" podStartSLOduration=34.496455998 podStartE2EDuration="40.167640434s" podCreationTimestamp="2025-11-26 14:36:59 +0000 UTC" firstStartedPulling="2025-11-26 14:37:32.111144334 +0000 UTC m=+1318.907914518" lastFinishedPulling="2025-11-26 14:37:37.78232877 +0000 UTC m=+1324.579098954" observedRunningTime="2025-11-26 14:37:39.1629958 +0000 UTC m=+1325.959765994" watchObservedRunningTime="2025-11-26 14:37:39.167640434 +0000 UTC m=+1325.964410618" Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.185512 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-fcz7d" podStartSLOduration=11.209992033 podStartE2EDuration="40.1854948s" podCreationTimestamp="2025-11-26 14:36:59 +0000 UTC" firstStartedPulling="2025-11-26 14:37:01.379696052 +0000 UTC m=+1288.176466246" lastFinishedPulling="2025-11-26 14:37:30.355198829 +0000 UTC m=+1317.151969013" observedRunningTime="2025-11-26 14:37:39.182354333 +0000 UTC m=+1325.979124537" watchObservedRunningTime="2025-11-26 14:37:39.1854948 +0000 UTC m=+1325.982264984" Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.206625 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-q4qd8" podStartSLOduration=14.054825792 podStartE2EDuration="40.206608045s" podCreationTimestamp="2025-11-26 14:36:59 +0000 UTC" firstStartedPulling="2025-11-26 14:37:01.371487202 +0000 UTC m=+1288.168257386" lastFinishedPulling="2025-11-26 14:37:27.523269445 +0000 UTC m=+1314.320039639" observedRunningTime="2025-11-26 14:37:39.20230416 +0000 UTC m=+1325.999074344" watchObservedRunningTime="2025-11-26 14:37:39.206608045 +0000 UTC m=+1326.003378219" Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.323398 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-shv5p" podStartSLOduration=3.27112574 podStartE2EDuration="40.323381373s" podCreationTimestamp="2025-11-26 14:36:59 +0000 UTC" firstStartedPulling="2025-11-26 14:37:01.122060709 +0000 UTC m=+1287.918830893" lastFinishedPulling="2025-11-26 14:37:38.174316342 +0000 UTC m=+1324.971086526" observedRunningTime="2025-11-26 14:37:39.291026414 +0000 UTC m=+1326.087796608" watchObservedRunningTime="2025-11-26 14:37:39.323381373 +0000 UTC m=+1326.120151557" Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.323612 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-7szzf" podStartSLOduration=13.77369685 podStartE2EDuration="40.323607759s" podCreationTimestamp="2025-11-26 14:36:59 +0000 UTC" firstStartedPulling="2025-11-26 14:37:00.972821854 +0000 UTC m=+1287.769592038" lastFinishedPulling="2025-11-26 14:37:27.522732743 +0000 UTC m=+1314.319502947" observedRunningTime="2025-11-26 14:37:39.321657371 +0000 UTC m=+1326.118427585" watchObservedRunningTime="2025-11-26 14:37:39.323607759 +0000 UTC m=+1326.120377943" Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.359700 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-wrk4n" podStartSLOduration=11.566549096 podStartE2EDuration="40.359686309s" podCreationTimestamp="2025-11-26 14:36:59 +0000 UTC" firstStartedPulling="2025-11-26 14:37:01.561833161 +0000 UTC m=+1288.358603345" lastFinishedPulling="2025-11-26 14:37:30.354970374 +0000 UTC m=+1317.151740558" observedRunningTime="2025-11-26 14:37:39.358393887 +0000 UTC m=+1326.155164071" watchObservedRunningTime="2025-11-26 14:37:39.359686309 +0000 UTC m=+1326.156456493" Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.405424 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-57548d458d-vdw9h" podStartSLOduration=34.280683396 podStartE2EDuration="40.405406515s" podCreationTimestamp="2025-11-26 14:36:59 +0000 UTC" firstStartedPulling="2025-11-26 14:37:31.63502284 +0000 UTC m=+1318.431793024" lastFinishedPulling="2025-11-26 14:37:37.759745959 +0000 UTC m=+1324.556516143" observedRunningTime="2025-11-26 14:37:39.398624439 +0000 UTC m=+1326.195394623" watchObservedRunningTime="2025-11-26 14:37:39.405406515 +0000 UTC m=+1326.202176699" Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.427457 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-f5hkj" podStartSLOduration=3.6323026499999997 podStartE2EDuration="40.427438501s" podCreationTimestamp="2025-11-26 14:36:59 +0000 UTC" firstStartedPulling="2025-11-26 14:37:01.393248704 +0000 UTC m=+1288.190018888" lastFinishedPulling="2025-11-26 14:37:38.188384555 +0000 UTC m=+1324.985154739" observedRunningTime="2025-11-26 14:37:39.418495364 +0000 UTC m=+1326.215265558" watchObservedRunningTime="2025-11-26 14:37:39.427438501 +0000 UTC m=+1326.224208675" Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.442566 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-927td" podStartSLOduration=2.59022037 podStartE2EDuration="40.44254657s" podCreationTimestamp="2025-11-26 14:36:59 +0000 UTC" firstStartedPulling="2025-11-26 14:37:00.826682234 +0000 UTC m=+1287.623452418" lastFinishedPulling="2025-11-26 14:37:38.679008434 +0000 UTC m=+1325.475778618" observedRunningTime="2025-11-26 14:37:39.441344731 +0000 UTC m=+1326.238114925" watchObservedRunningTime="2025-11-26 14:37:39.44254657 +0000 UTC m=+1326.239316754" Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.479663 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-8hkwd" podStartSLOduration=14.087594454 podStartE2EDuration="40.479644426s" podCreationTimestamp="2025-11-26 14:36:59 +0000 UTC" firstStartedPulling="2025-11-26 14:37:01.132586977 +0000 UTC m=+1287.929357161" lastFinishedPulling="2025-11-26 14:37:27.524636939 +0000 UTC m=+1314.321407133" observedRunningTime="2025-11-26 14:37:39.475580956 +0000 UTC m=+1326.272351140" watchObservedRunningTime="2025-11-26 14:37:39.479644426 +0000 UTC m=+1326.276414610" Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.499368 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-ndntx" Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.531826 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-955677c94-k67q7" Nov 26 14:37:39 crc kubenswrapper[5037]: I1126 14:37:39.918884 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-s25dn" Nov 26 14:37:40 crc kubenswrapper[5037]: I1126 14:37:40.058789 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-57548d458d-vdw9h" Nov 26 14:37:40 crc kubenswrapper[5037]: I1126 14:37:40.344539 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-hn8b8" Nov 26 14:37:41 crc kubenswrapper[5037]: I1126 14:37:41.064013 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-z7cq5" event={"ID":"8da78b02-ca91-4fca-8710-875bfdd6e6a9","Type":"ContainerStarted","Data":"4281543c16d129515079e7c02add4cb229cdd98fa88621e9a1e7ebc15efc42d0"} Nov 26 14:37:41 crc kubenswrapper[5037]: I1126 14:37:41.064537 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-z7cq5" Nov 26 14:37:41 crc kubenswrapper[5037]: I1126 14:37:41.067124 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-tsv6x" event={"ID":"46f325c2-aa51-4684-aec4-0c31eb822e6d","Type":"ContainerStarted","Data":"876a05f92835f51d1dd693b104a1a4906e18a95efbf354d6db47508f09fad957"} Nov 26 14:37:41 crc kubenswrapper[5037]: I1126 14:37:41.067256 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-tsv6x" Nov 26 14:37:41 crc kubenswrapper[5037]: I1126 14:37:41.069117 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-7qg65" event={"ID":"dad0150e-fc25-4245-ad22-e940fadd107a","Type":"ContainerStarted","Data":"0f527abdb35610cdf1b0bcc3ec0707b0314b47a545e4937e37b766a960be6b47"} Nov 26 14:37:41 crc kubenswrapper[5037]: I1126 14:37:41.069227 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-7qg65" Nov 26 14:37:41 crc kubenswrapper[5037]: I1126 14:37:41.070939 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-q6n7b" event={"ID":"3982528b-3a86-43af-a0af-2f0ddd71e349","Type":"ContainerStarted","Data":"ac2b323205acdce1dc547b3f221223b89d287c08593f92e1d3c03beb3619b8d4"} Nov 26 14:37:41 crc kubenswrapper[5037]: I1126 14:37:41.071421 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-q6n7b" Nov 26 14:37:41 crc kubenswrapper[5037]: I1126 14:37:41.073070 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-bgknz" event={"ID":"8e942820-209d-40b6-bd79-1836b7af00bb","Type":"ContainerStarted","Data":"2fc75dcabf6de78c6d74718f368a0822323c8d4bc879f12a6be58662999f594b"} Nov 26 14:37:41 crc kubenswrapper[5037]: I1126 14:37:41.085072 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-z7cq5" podStartSLOduration=3.394113769 podStartE2EDuration="42.085053148s" podCreationTimestamp="2025-11-26 14:36:59 +0000 UTC" firstStartedPulling="2025-11-26 14:37:01.123715979 +0000 UTC m=+1287.920486163" lastFinishedPulling="2025-11-26 14:37:39.814655368 +0000 UTC m=+1326.611425542" observedRunningTime="2025-11-26 14:37:41.079225106 +0000 UTC m=+1327.875995310" watchObservedRunningTime="2025-11-26 14:37:41.085053148 +0000 UTC m=+1327.881823332" Nov 26 14:37:41 crc kubenswrapper[5037]: I1126 14:37:41.099810 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-bgknz" podStartSLOduration=3.152805813 podStartE2EDuration="42.099791868s" podCreationTimestamp="2025-11-26 14:36:59 +0000 UTC" firstStartedPulling="2025-11-26 14:37:01.124774195 +0000 UTC m=+1287.921544379" lastFinishedPulling="2025-11-26 14:37:40.07176025 +0000 UTC m=+1326.868530434" observedRunningTime="2025-11-26 14:37:41.094316774 +0000 UTC m=+1327.891086968" watchObservedRunningTime="2025-11-26 14:37:41.099791868 +0000 UTC m=+1327.896562062" Nov 26 14:37:41 crc kubenswrapper[5037]: I1126 14:37:41.110078 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-tsv6x" podStartSLOduration=3.89563296 podStartE2EDuration="42.110057758s" podCreationTimestamp="2025-11-26 14:36:59 +0000 UTC" firstStartedPulling="2025-11-26 14:37:01.600711321 +0000 UTC m=+1288.397481505" lastFinishedPulling="2025-11-26 14:37:39.815136119 +0000 UTC m=+1326.611906303" observedRunningTime="2025-11-26 14:37:41.109594827 +0000 UTC m=+1327.906365011" watchObservedRunningTime="2025-11-26 14:37:41.110057758 +0000 UTC m=+1327.906827942" Nov 26 14:37:41 crc kubenswrapper[5037]: I1126 14:37:41.151298 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-7qg65" podStartSLOduration=3.309889221 podStartE2EDuration="42.151255933s" podCreationTimestamp="2025-11-26 14:36:59 +0000 UTC" firstStartedPulling="2025-11-26 14:37:00.97183086 +0000 UTC m=+1287.768601054" lastFinishedPulling="2025-11-26 14:37:39.813197582 +0000 UTC m=+1326.609967766" observedRunningTime="2025-11-26 14:37:41.150771291 +0000 UTC m=+1327.947541475" watchObservedRunningTime="2025-11-26 14:37:41.151255933 +0000 UTC m=+1327.948026117" Nov 26 14:37:41 crc kubenswrapper[5037]: I1126 14:37:41.171077 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-q6n7b" podStartSLOduration=3.350823798 podStartE2EDuration="42.171051707s" podCreationTimestamp="2025-11-26 14:36:59 +0000 UTC" firstStartedPulling="2025-11-26 14:37:01.22647945 +0000 UTC m=+1288.023249634" lastFinishedPulling="2025-11-26 14:37:40.046707359 +0000 UTC m=+1326.843477543" observedRunningTime="2025-11-26 14:37:41.167044519 +0000 UTC m=+1327.963814703" watchObservedRunningTime="2025-11-26 14:37:41.171051707 +0000 UTC m=+1327.967821891" Nov 26 14:37:42 crc kubenswrapper[5037]: I1126 14:37:42.079317 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-bgknz" Nov 26 14:37:45 crc kubenswrapper[5037]: I1126 14:37:45.316628 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-57548d458d-vdw9h" Nov 26 14:37:45 crc kubenswrapper[5037]: I1126 14:37:45.919772 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8xwf2x" Nov 26 14:37:46 crc kubenswrapper[5037]: I1126 14:37:46.268955 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-7qgq6" Nov 26 14:37:49 crc kubenswrapper[5037]: I1126 14:37:49.744673 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-927td" Nov 26 14:37:49 crc kubenswrapper[5037]: I1126 14:37:49.782952 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-7qg65" Nov 26 14:37:49 crc kubenswrapper[5037]: I1126 14:37:49.950977 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-bgknz" Nov 26 14:37:50 crc kubenswrapper[5037]: I1126 14:37:50.051666 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-q6n7b" Nov 26 14:37:50 crc kubenswrapper[5037]: I1126 14:37:50.100364 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-z7cq5" Nov 26 14:37:50 crc kubenswrapper[5037]: I1126 14:37:50.240854 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-shv5p" Nov 26 14:37:50 crc kubenswrapper[5037]: I1126 14:37:50.456788 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-f5hkj" Nov 26 14:37:50 crc kubenswrapper[5037]: I1126 14:37:50.650385 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-tsv6x" Nov 26 14:38:06 crc kubenswrapper[5037]: I1126 14:38:06.911456 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7bdd77c89-qsvms"] Nov 26 14:38:06 crc kubenswrapper[5037]: I1126 14:38:06.915642 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bdd77c89-qsvms" Nov 26 14:38:06 crc kubenswrapper[5037]: I1126 14:38:06.920751 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 26 14:38:06 crc kubenswrapper[5037]: I1126 14:38:06.920976 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 26 14:38:06 crc kubenswrapper[5037]: I1126 14:38:06.924150 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 26 14:38:06 crc kubenswrapper[5037]: I1126 14:38:06.924277 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7bdd77c89-qsvms"] Nov 26 14:38:06 crc kubenswrapper[5037]: I1126 14:38:06.929451 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-gxxc4" Nov 26 14:38:06 crc kubenswrapper[5037]: I1126 14:38:06.981331 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6584b49599-56g66"] Nov 26 14:38:06 crc kubenswrapper[5037]: I1126 14:38:06.992741 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6584b49599-56g66" Nov 26 14:38:06 crc kubenswrapper[5037]: I1126 14:38:06.996648 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 26 14:38:07 crc kubenswrapper[5037]: I1126 14:38:07.008091 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6584b49599-56g66"] Nov 26 14:38:07 crc kubenswrapper[5037]: I1126 14:38:07.038456 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a594ed83-eda8-4667-825e-ace74544882c-config\") pod \"dnsmasq-dns-7bdd77c89-qsvms\" (UID: \"a594ed83-eda8-4667-825e-ace74544882c\") " pod="openstack/dnsmasq-dns-7bdd77c89-qsvms" Nov 26 14:38:07 crc kubenswrapper[5037]: I1126 14:38:07.038526 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mhmx5\" (UniqueName: \"kubernetes.io/projected/a594ed83-eda8-4667-825e-ace74544882c-kube-api-access-mhmx5\") pod \"dnsmasq-dns-7bdd77c89-qsvms\" (UID: \"a594ed83-eda8-4667-825e-ace74544882c\") " pod="openstack/dnsmasq-dns-7bdd77c89-qsvms" Nov 26 14:38:07 crc kubenswrapper[5037]: I1126 14:38:07.140193 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/516a4181-8e5b-4cd1-8a64-e51748a3560d-dns-svc\") pod \"dnsmasq-dns-6584b49599-56g66\" (UID: \"516a4181-8e5b-4cd1-8a64-e51748a3560d\") " pod="openstack/dnsmasq-dns-6584b49599-56g66" Nov 26 14:38:07 crc kubenswrapper[5037]: I1126 14:38:07.140498 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/516a4181-8e5b-4cd1-8a64-e51748a3560d-config\") pod \"dnsmasq-dns-6584b49599-56g66\" (UID: \"516a4181-8e5b-4cd1-8a64-e51748a3560d\") " pod="openstack/dnsmasq-dns-6584b49599-56g66" Nov 26 14:38:07 crc kubenswrapper[5037]: I1126 14:38:07.140658 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a594ed83-eda8-4667-825e-ace74544882c-config\") pod \"dnsmasq-dns-7bdd77c89-qsvms\" (UID: \"a594ed83-eda8-4667-825e-ace74544882c\") " pod="openstack/dnsmasq-dns-7bdd77c89-qsvms" Nov 26 14:38:07 crc kubenswrapper[5037]: I1126 14:38:07.140774 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mhmx5\" (UniqueName: \"kubernetes.io/projected/a594ed83-eda8-4667-825e-ace74544882c-kube-api-access-mhmx5\") pod \"dnsmasq-dns-7bdd77c89-qsvms\" (UID: \"a594ed83-eda8-4667-825e-ace74544882c\") " pod="openstack/dnsmasq-dns-7bdd77c89-qsvms" Nov 26 14:38:07 crc kubenswrapper[5037]: I1126 14:38:07.140948 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bs6x6\" (UniqueName: \"kubernetes.io/projected/516a4181-8e5b-4cd1-8a64-e51748a3560d-kube-api-access-bs6x6\") pod \"dnsmasq-dns-6584b49599-56g66\" (UID: \"516a4181-8e5b-4cd1-8a64-e51748a3560d\") " pod="openstack/dnsmasq-dns-6584b49599-56g66" Nov 26 14:38:07 crc kubenswrapper[5037]: I1126 14:38:07.141880 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a594ed83-eda8-4667-825e-ace74544882c-config\") pod \"dnsmasq-dns-7bdd77c89-qsvms\" (UID: \"a594ed83-eda8-4667-825e-ace74544882c\") " pod="openstack/dnsmasq-dns-7bdd77c89-qsvms" Nov 26 14:38:07 crc kubenswrapper[5037]: I1126 14:38:07.164598 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mhmx5\" (UniqueName: \"kubernetes.io/projected/a594ed83-eda8-4667-825e-ace74544882c-kube-api-access-mhmx5\") pod \"dnsmasq-dns-7bdd77c89-qsvms\" (UID: \"a594ed83-eda8-4667-825e-ace74544882c\") " pod="openstack/dnsmasq-dns-7bdd77c89-qsvms" Nov 26 14:38:07 crc kubenswrapper[5037]: I1126 14:38:07.242206 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bs6x6\" (UniqueName: \"kubernetes.io/projected/516a4181-8e5b-4cd1-8a64-e51748a3560d-kube-api-access-bs6x6\") pod \"dnsmasq-dns-6584b49599-56g66\" (UID: \"516a4181-8e5b-4cd1-8a64-e51748a3560d\") " pod="openstack/dnsmasq-dns-6584b49599-56g66" Nov 26 14:38:07 crc kubenswrapper[5037]: I1126 14:38:07.242308 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/516a4181-8e5b-4cd1-8a64-e51748a3560d-dns-svc\") pod \"dnsmasq-dns-6584b49599-56g66\" (UID: \"516a4181-8e5b-4cd1-8a64-e51748a3560d\") " pod="openstack/dnsmasq-dns-6584b49599-56g66" Nov 26 14:38:07 crc kubenswrapper[5037]: I1126 14:38:07.243396 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/516a4181-8e5b-4cd1-8a64-e51748a3560d-dns-svc\") pod \"dnsmasq-dns-6584b49599-56g66\" (UID: \"516a4181-8e5b-4cd1-8a64-e51748a3560d\") " pod="openstack/dnsmasq-dns-6584b49599-56g66" Nov 26 14:38:07 crc kubenswrapper[5037]: I1126 14:38:07.243460 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/516a4181-8e5b-4cd1-8a64-e51748a3560d-config\") pod \"dnsmasq-dns-6584b49599-56g66\" (UID: \"516a4181-8e5b-4cd1-8a64-e51748a3560d\") " pod="openstack/dnsmasq-dns-6584b49599-56g66" Nov 26 14:38:07 crc kubenswrapper[5037]: I1126 14:38:07.243624 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/516a4181-8e5b-4cd1-8a64-e51748a3560d-config\") pod \"dnsmasq-dns-6584b49599-56g66\" (UID: \"516a4181-8e5b-4cd1-8a64-e51748a3560d\") " pod="openstack/dnsmasq-dns-6584b49599-56g66" Nov 26 14:38:07 crc kubenswrapper[5037]: I1126 14:38:07.244913 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bdd77c89-qsvms" Nov 26 14:38:07 crc kubenswrapper[5037]: I1126 14:38:07.273170 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bs6x6\" (UniqueName: \"kubernetes.io/projected/516a4181-8e5b-4cd1-8a64-e51748a3560d-kube-api-access-bs6x6\") pod \"dnsmasq-dns-6584b49599-56g66\" (UID: \"516a4181-8e5b-4cd1-8a64-e51748a3560d\") " pod="openstack/dnsmasq-dns-6584b49599-56g66" Nov 26 14:38:07 crc kubenswrapper[5037]: I1126 14:38:07.323958 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6584b49599-56g66" Nov 26 14:38:07 crc kubenswrapper[5037]: I1126 14:38:07.517210 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7bdd77c89-qsvms"] Nov 26 14:38:07 crc kubenswrapper[5037]: W1126 14:38:07.535798 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda594ed83_eda8_4667_825e_ace74544882c.slice/crio-ab2fb9e9317f50c3a655aed4cf68fe157b15e3693abd0eff26ece01071b28f0b WatchSource:0}: Error finding container ab2fb9e9317f50c3a655aed4cf68fe157b15e3693abd0eff26ece01071b28f0b: Status 404 returned error can't find the container with id ab2fb9e9317f50c3a655aed4cf68fe157b15e3693abd0eff26ece01071b28f0b Nov 26 14:38:07 crc kubenswrapper[5037]: I1126 14:38:07.812752 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6584b49599-56g66"] Nov 26 14:38:07 crc kubenswrapper[5037]: W1126 14:38:07.812996 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod516a4181_8e5b_4cd1_8a64_e51748a3560d.slice/crio-43d0982df41696ad76091af64635bd9aa148876dde5f3afaac25677dd5fb1e69 WatchSource:0}: Error finding container 43d0982df41696ad76091af64635bd9aa148876dde5f3afaac25677dd5fb1e69: Status 404 returned error can't find the container with id 43d0982df41696ad76091af64635bd9aa148876dde5f3afaac25677dd5fb1e69 Nov 26 14:38:08 crc kubenswrapper[5037]: I1126 14:38:08.293656 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6584b49599-56g66" event={"ID":"516a4181-8e5b-4cd1-8a64-e51748a3560d","Type":"ContainerStarted","Data":"43d0982df41696ad76091af64635bd9aa148876dde5f3afaac25677dd5fb1e69"} Nov 26 14:38:08 crc kubenswrapper[5037]: I1126 14:38:08.296653 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bdd77c89-qsvms" event={"ID":"a594ed83-eda8-4667-825e-ace74544882c","Type":"ContainerStarted","Data":"ab2fb9e9317f50c3a655aed4cf68fe157b15e3693abd0eff26ece01071b28f0b"} Nov 26 14:38:09 crc kubenswrapper[5037]: I1126 14:38:09.144596 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6584b49599-56g66"] Nov 26 14:38:09 crc kubenswrapper[5037]: I1126 14:38:09.174816 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7c6d9948dc-d58hm"] Nov 26 14:38:09 crc kubenswrapper[5037]: I1126 14:38:09.176674 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c6d9948dc-d58hm" Nov 26 14:38:09 crc kubenswrapper[5037]: I1126 14:38:09.184045 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c6d9948dc-d58hm"] Nov 26 14:38:09 crc kubenswrapper[5037]: I1126 14:38:09.278018 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zlmdc\" (UniqueName: \"kubernetes.io/projected/0346fe68-c180-481b-879e-59ee91287a7e-kube-api-access-zlmdc\") pod \"dnsmasq-dns-7c6d9948dc-d58hm\" (UID: \"0346fe68-c180-481b-879e-59ee91287a7e\") " pod="openstack/dnsmasq-dns-7c6d9948dc-d58hm" Nov 26 14:38:09 crc kubenswrapper[5037]: I1126 14:38:09.278101 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0346fe68-c180-481b-879e-59ee91287a7e-dns-svc\") pod \"dnsmasq-dns-7c6d9948dc-d58hm\" (UID: \"0346fe68-c180-481b-879e-59ee91287a7e\") " pod="openstack/dnsmasq-dns-7c6d9948dc-d58hm" Nov 26 14:38:09 crc kubenswrapper[5037]: I1126 14:38:09.278607 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0346fe68-c180-481b-879e-59ee91287a7e-config\") pod \"dnsmasq-dns-7c6d9948dc-d58hm\" (UID: \"0346fe68-c180-481b-879e-59ee91287a7e\") " pod="openstack/dnsmasq-dns-7c6d9948dc-d58hm" Nov 26 14:38:09 crc kubenswrapper[5037]: I1126 14:38:09.380494 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0346fe68-c180-481b-879e-59ee91287a7e-dns-svc\") pod \"dnsmasq-dns-7c6d9948dc-d58hm\" (UID: \"0346fe68-c180-481b-879e-59ee91287a7e\") " pod="openstack/dnsmasq-dns-7c6d9948dc-d58hm" Nov 26 14:38:09 crc kubenswrapper[5037]: I1126 14:38:09.380617 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0346fe68-c180-481b-879e-59ee91287a7e-config\") pod \"dnsmasq-dns-7c6d9948dc-d58hm\" (UID: \"0346fe68-c180-481b-879e-59ee91287a7e\") " pod="openstack/dnsmasq-dns-7c6d9948dc-d58hm" Nov 26 14:38:09 crc kubenswrapper[5037]: I1126 14:38:09.380673 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zlmdc\" (UniqueName: \"kubernetes.io/projected/0346fe68-c180-481b-879e-59ee91287a7e-kube-api-access-zlmdc\") pod \"dnsmasq-dns-7c6d9948dc-d58hm\" (UID: \"0346fe68-c180-481b-879e-59ee91287a7e\") " pod="openstack/dnsmasq-dns-7c6d9948dc-d58hm" Nov 26 14:38:09 crc kubenswrapper[5037]: I1126 14:38:09.381754 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0346fe68-c180-481b-879e-59ee91287a7e-dns-svc\") pod \"dnsmasq-dns-7c6d9948dc-d58hm\" (UID: \"0346fe68-c180-481b-879e-59ee91287a7e\") " pod="openstack/dnsmasq-dns-7c6d9948dc-d58hm" Nov 26 14:38:09 crc kubenswrapper[5037]: I1126 14:38:09.383945 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0346fe68-c180-481b-879e-59ee91287a7e-config\") pod \"dnsmasq-dns-7c6d9948dc-d58hm\" (UID: \"0346fe68-c180-481b-879e-59ee91287a7e\") " pod="openstack/dnsmasq-dns-7c6d9948dc-d58hm" Nov 26 14:38:09 crc kubenswrapper[5037]: I1126 14:38:09.419948 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zlmdc\" (UniqueName: \"kubernetes.io/projected/0346fe68-c180-481b-879e-59ee91287a7e-kube-api-access-zlmdc\") pod \"dnsmasq-dns-7c6d9948dc-d58hm\" (UID: \"0346fe68-c180-481b-879e-59ee91287a7e\") " pod="openstack/dnsmasq-dns-7c6d9948dc-d58hm" Nov 26 14:38:09 crc kubenswrapper[5037]: I1126 14:38:09.513858 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c6d9948dc-d58hm" Nov 26 14:38:09 crc kubenswrapper[5037]: I1126 14:38:09.658247 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7bdd77c89-qsvms"] Nov 26 14:38:09 crc kubenswrapper[5037]: I1126 14:38:09.698794 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6486446b9f-gc7pm"] Nov 26 14:38:09 crc kubenswrapper[5037]: I1126 14:38:09.700576 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6486446b9f-gc7pm" Nov 26 14:38:09 crc kubenswrapper[5037]: I1126 14:38:09.751706 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6486446b9f-gc7pm"] Nov 26 14:38:09 crc kubenswrapper[5037]: I1126 14:38:09.901899 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/073bdcf3-16fa-4b27-8a82-709bf0e1bf1c-dns-svc\") pod \"dnsmasq-dns-6486446b9f-gc7pm\" (UID: \"073bdcf3-16fa-4b27-8a82-709bf0e1bf1c\") " pod="openstack/dnsmasq-dns-6486446b9f-gc7pm" Nov 26 14:38:09 crc kubenswrapper[5037]: I1126 14:38:09.902326 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ns68w\" (UniqueName: \"kubernetes.io/projected/073bdcf3-16fa-4b27-8a82-709bf0e1bf1c-kube-api-access-ns68w\") pod \"dnsmasq-dns-6486446b9f-gc7pm\" (UID: \"073bdcf3-16fa-4b27-8a82-709bf0e1bf1c\") " pod="openstack/dnsmasq-dns-6486446b9f-gc7pm" Nov 26 14:38:09 crc kubenswrapper[5037]: I1126 14:38:09.902363 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/073bdcf3-16fa-4b27-8a82-709bf0e1bf1c-config\") pod \"dnsmasq-dns-6486446b9f-gc7pm\" (UID: \"073bdcf3-16fa-4b27-8a82-709bf0e1bf1c\") " pod="openstack/dnsmasq-dns-6486446b9f-gc7pm" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.003416 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/073bdcf3-16fa-4b27-8a82-709bf0e1bf1c-dns-svc\") pod \"dnsmasq-dns-6486446b9f-gc7pm\" (UID: \"073bdcf3-16fa-4b27-8a82-709bf0e1bf1c\") " pod="openstack/dnsmasq-dns-6486446b9f-gc7pm" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.003508 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ns68w\" (UniqueName: \"kubernetes.io/projected/073bdcf3-16fa-4b27-8a82-709bf0e1bf1c-kube-api-access-ns68w\") pod \"dnsmasq-dns-6486446b9f-gc7pm\" (UID: \"073bdcf3-16fa-4b27-8a82-709bf0e1bf1c\") " pod="openstack/dnsmasq-dns-6486446b9f-gc7pm" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.004107 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/073bdcf3-16fa-4b27-8a82-709bf0e1bf1c-config\") pod \"dnsmasq-dns-6486446b9f-gc7pm\" (UID: \"073bdcf3-16fa-4b27-8a82-709bf0e1bf1c\") " pod="openstack/dnsmasq-dns-6486446b9f-gc7pm" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.004725 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/073bdcf3-16fa-4b27-8a82-709bf0e1bf1c-dns-svc\") pod \"dnsmasq-dns-6486446b9f-gc7pm\" (UID: \"073bdcf3-16fa-4b27-8a82-709bf0e1bf1c\") " pod="openstack/dnsmasq-dns-6486446b9f-gc7pm" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.004926 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/073bdcf3-16fa-4b27-8a82-709bf0e1bf1c-config\") pod \"dnsmasq-dns-6486446b9f-gc7pm\" (UID: \"073bdcf3-16fa-4b27-8a82-709bf0e1bf1c\") " pod="openstack/dnsmasq-dns-6486446b9f-gc7pm" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.042618 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ns68w\" (UniqueName: \"kubernetes.io/projected/073bdcf3-16fa-4b27-8a82-709bf0e1bf1c-kube-api-access-ns68w\") pod \"dnsmasq-dns-6486446b9f-gc7pm\" (UID: \"073bdcf3-16fa-4b27-8a82-709bf0e1bf1c\") " pod="openstack/dnsmasq-dns-6486446b9f-gc7pm" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.230160 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c6d9948dc-d58hm"] Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.323754 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6486446b9f-gc7pm" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.363396 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c6d9948dc-d58hm" event={"ID":"0346fe68-c180-481b-879e-59ee91287a7e","Type":"ContainerStarted","Data":"62ef05b460955a36a8707c495ec79b4a4609871d7bffa021944164e9b30b8dda"} Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.422863 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.425800 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.429509 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-mzss7" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.430612 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.430859 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.431010 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.431178 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.431425 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.431690 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.466317 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.618251 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ba78b94a-32d0-4377-ac41-ffd036b241bf-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " pod="openstack/rabbitmq-server-0" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.618869 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ba78b94a-32d0-4377-ac41-ffd036b241bf-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " pod="openstack/rabbitmq-server-0" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.618931 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ba78b94a-32d0-4377-ac41-ffd036b241bf-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " pod="openstack/rabbitmq-server-0" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.618962 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ba78b94a-32d0-4377-ac41-ffd036b241bf-config-data\") pod \"rabbitmq-server-0\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " pod="openstack/rabbitmq-server-0" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.618994 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ba78b94a-32d0-4377-ac41-ffd036b241bf-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " pod="openstack/rabbitmq-server-0" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.619017 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ba78b94a-32d0-4377-ac41-ffd036b241bf-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " pod="openstack/rabbitmq-server-0" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.619036 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ba78b94a-32d0-4377-ac41-ffd036b241bf-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " pod="openstack/rabbitmq-server-0" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.619063 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ba78b94a-32d0-4377-ac41-ffd036b241bf-server-conf\") pod \"rabbitmq-server-0\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " pod="openstack/rabbitmq-server-0" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.619102 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " pod="openstack/rabbitmq-server-0" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.619126 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ba78b94a-32d0-4377-ac41-ffd036b241bf-pod-info\") pod \"rabbitmq-server-0\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " pod="openstack/rabbitmq-server-0" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.619158 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n275f\" (UniqueName: \"kubernetes.io/projected/ba78b94a-32d0-4377-ac41-ffd036b241bf-kube-api-access-n275f\") pod \"rabbitmq-server-0\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " pod="openstack/rabbitmq-server-0" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.710855 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6486446b9f-gc7pm"] Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.721507 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ba78b94a-32d0-4377-ac41-ffd036b241bf-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " pod="openstack/rabbitmq-server-0" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.721582 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ba78b94a-32d0-4377-ac41-ffd036b241bf-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " pod="openstack/rabbitmq-server-0" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.721647 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ba78b94a-32d0-4377-ac41-ffd036b241bf-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " pod="openstack/rabbitmq-server-0" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.721680 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ba78b94a-32d0-4377-ac41-ffd036b241bf-config-data\") pod \"rabbitmq-server-0\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " pod="openstack/rabbitmq-server-0" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.721708 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ba78b94a-32d0-4377-ac41-ffd036b241bf-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " pod="openstack/rabbitmq-server-0" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.721732 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ba78b94a-32d0-4377-ac41-ffd036b241bf-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " pod="openstack/rabbitmq-server-0" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.721762 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ba78b94a-32d0-4377-ac41-ffd036b241bf-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " pod="openstack/rabbitmq-server-0" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.721799 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ba78b94a-32d0-4377-ac41-ffd036b241bf-server-conf\") pod \"rabbitmq-server-0\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " pod="openstack/rabbitmq-server-0" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.721859 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " pod="openstack/rabbitmq-server-0" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.721899 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ba78b94a-32d0-4377-ac41-ffd036b241bf-pod-info\") pod \"rabbitmq-server-0\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " pod="openstack/rabbitmq-server-0" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.721944 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n275f\" (UniqueName: \"kubernetes.io/projected/ba78b94a-32d0-4377-ac41-ffd036b241bf-kube-api-access-n275f\") pod \"rabbitmq-server-0\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " pod="openstack/rabbitmq-server-0" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.722438 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ba78b94a-32d0-4377-ac41-ffd036b241bf-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " pod="openstack/rabbitmq-server-0" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.722835 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ba78b94a-32d0-4377-ac41-ffd036b241bf-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " pod="openstack/rabbitmq-server-0" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.722993 5037 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/rabbitmq-server-0" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.723557 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ba78b94a-32d0-4377-ac41-ffd036b241bf-config-data\") pod \"rabbitmq-server-0\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " pod="openstack/rabbitmq-server-0" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.724436 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ba78b94a-32d0-4377-ac41-ffd036b241bf-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " pod="openstack/rabbitmq-server-0" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.725238 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ba78b94a-32d0-4377-ac41-ffd036b241bf-server-conf\") pod \"rabbitmq-server-0\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " pod="openstack/rabbitmq-server-0" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.732735 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ba78b94a-32d0-4377-ac41-ffd036b241bf-pod-info\") pod \"rabbitmq-server-0\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " pod="openstack/rabbitmq-server-0" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.733046 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ba78b94a-32d0-4377-ac41-ffd036b241bf-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " pod="openstack/rabbitmq-server-0" Nov 26 14:38:10 crc kubenswrapper[5037]: W1126 14:38:10.733207 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod073bdcf3_16fa_4b27_8a82_709bf0e1bf1c.slice/crio-5a4dcf11e537568bf4494020f248d1d7b9f7c7f4b619cdf8bbc676287241731c WatchSource:0}: Error finding container 5a4dcf11e537568bf4494020f248d1d7b9f7c7f4b619cdf8bbc676287241731c: Status 404 returned error can't find the container with id 5a4dcf11e537568bf4494020f248d1d7b9f7c7f4b619cdf8bbc676287241731c Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.728676 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ba78b94a-32d0-4377-ac41-ffd036b241bf-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " pod="openstack/rabbitmq-server-0" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.738710 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n275f\" (UniqueName: \"kubernetes.io/projected/ba78b94a-32d0-4377-ac41-ffd036b241bf-kube-api-access-n275f\") pod \"rabbitmq-server-0\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " pod="openstack/rabbitmq-server-0" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.754866 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ba78b94a-32d0-4377-ac41-ffd036b241bf-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " pod="openstack/rabbitmq-server-0" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.777074 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " pod="openstack/rabbitmq-server-0" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.785835 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.850897 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.853642 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.858693 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.858945 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.859218 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-5wztt" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.859455 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.860274 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.860557 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.860695 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 26 14:38:10 crc kubenswrapper[5037]: I1126 14:38:10.887818 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 14:38:11 crc kubenswrapper[5037]: I1126 14:38:11.028663 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7f05291f-1331-411b-9971-c71218d11a35-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:38:11 crc kubenswrapper[5037]: I1126 14:38:11.030763 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7f05291f-1331-411b-9971-c71218d11a35-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:38:11 crc kubenswrapper[5037]: I1126 14:38:11.030873 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7f05291f-1331-411b-9971-c71218d11a35-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:38:11 crc kubenswrapper[5037]: I1126 14:38:11.030906 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7f05291f-1331-411b-9971-c71218d11a35-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:38:11 crc kubenswrapper[5037]: I1126 14:38:11.031127 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kksth\" (UniqueName: \"kubernetes.io/projected/7f05291f-1331-411b-9971-c71218d11a35-kube-api-access-kksth\") pod \"rabbitmq-cell1-server-0\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:38:11 crc kubenswrapper[5037]: I1126 14:38:11.031388 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7f05291f-1331-411b-9971-c71218d11a35-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:38:11 crc kubenswrapper[5037]: I1126 14:38:11.031522 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7f05291f-1331-411b-9971-c71218d11a35-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:38:11 crc kubenswrapper[5037]: I1126 14:38:11.031596 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7f05291f-1331-411b-9971-c71218d11a35-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:38:11 crc kubenswrapper[5037]: I1126 14:38:11.031614 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7f05291f-1331-411b-9971-c71218d11a35-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:38:11 crc kubenswrapper[5037]: I1126 14:38:11.031677 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:38:11 crc kubenswrapper[5037]: I1126 14:38:11.031741 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7f05291f-1331-411b-9971-c71218d11a35-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:38:11 crc kubenswrapper[5037]: I1126 14:38:11.134514 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7f05291f-1331-411b-9971-c71218d11a35-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:38:11 crc kubenswrapper[5037]: I1126 14:38:11.134627 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7f05291f-1331-411b-9971-c71218d11a35-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:38:11 crc kubenswrapper[5037]: I1126 14:38:11.134660 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7f05291f-1331-411b-9971-c71218d11a35-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:38:11 crc kubenswrapper[5037]: I1126 14:38:11.134707 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:38:11 crc kubenswrapper[5037]: I1126 14:38:11.134739 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7f05291f-1331-411b-9971-c71218d11a35-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:38:11 crc kubenswrapper[5037]: I1126 14:38:11.134783 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7f05291f-1331-411b-9971-c71218d11a35-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:38:11 crc kubenswrapper[5037]: I1126 14:38:11.134804 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7f05291f-1331-411b-9971-c71218d11a35-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:38:11 crc kubenswrapper[5037]: I1126 14:38:11.134852 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7f05291f-1331-411b-9971-c71218d11a35-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:38:11 crc kubenswrapper[5037]: I1126 14:38:11.134873 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7f05291f-1331-411b-9971-c71218d11a35-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:38:11 crc kubenswrapper[5037]: I1126 14:38:11.134929 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kksth\" (UniqueName: \"kubernetes.io/projected/7f05291f-1331-411b-9971-c71218d11a35-kube-api-access-kksth\") pod \"rabbitmq-cell1-server-0\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:38:11 crc kubenswrapper[5037]: I1126 14:38:11.134984 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7f05291f-1331-411b-9971-c71218d11a35-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:38:11 crc kubenswrapper[5037]: I1126 14:38:11.137013 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7f05291f-1331-411b-9971-c71218d11a35-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:38:11 crc kubenswrapper[5037]: I1126 14:38:11.137695 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7f05291f-1331-411b-9971-c71218d11a35-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:38:11 crc kubenswrapper[5037]: I1126 14:38:11.141149 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7f05291f-1331-411b-9971-c71218d11a35-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:38:11 crc kubenswrapper[5037]: I1126 14:38:11.141496 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7f05291f-1331-411b-9971-c71218d11a35-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:38:11 crc kubenswrapper[5037]: I1126 14:38:11.142695 5037 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:38:11 crc kubenswrapper[5037]: I1126 14:38:11.145326 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7f05291f-1331-411b-9971-c71218d11a35-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:38:11 crc kubenswrapper[5037]: I1126 14:38:11.148653 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7f05291f-1331-411b-9971-c71218d11a35-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:38:11 crc kubenswrapper[5037]: I1126 14:38:11.161010 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7f05291f-1331-411b-9971-c71218d11a35-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:38:11 crc kubenswrapper[5037]: I1126 14:38:11.170469 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7f05291f-1331-411b-9971-c71218d11a35-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:38:11 crc kubenswrapper[5037]: I1126 14:38:11.170468 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7f05291f-1331-411b-9971-c71218d11a35-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:38:11 crc kubenswrapper[5037]: I1126 14:38:11.171748 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kksth\" (UniqueName: \"kubernetes.io/projected/7f05291f-1331-411b-9971-c71218d11a35-kube-api-access-kksth\") pod \"rabbitmq-cell1-server-0\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:38:11 crc kubenswrapper[5037]: I1126 14:38:11.202171 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:38:11 crc kubenswrapper[5037]: I1126 14:38:11.212876 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 14:38:11 crc kubenswrapper[5037]: I1126 14:38:11.378094 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"ba78b94a-32d0-4377-ac41-ffd036b241bf","Type":"ContainerStarted","Data":"ea19be20ec1a9986458ca26a10483190304f96a7d56d35f1e5efc302d4be13ee"} Nov 26 14:38:11 crc kubenswrapper[5037]: I1126 14:38:11.383664 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6486446b9f-gc7pm" event={"ID":"073bdcf3-16fa-4b27-8a82-709bf0e1bf1c","Type":"ContainerStarted","Data":"5a4dcf11e537568bf4494020f248d1d7b9f7c7f4b619cdf8bbc676287241731c"} Nov 26 14:38:11 crc kubenswrapper[5037]: I1126 14:38:11.502158 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:38:12 crc kubenswrapper[5037]: I1126 14:38:12.114931 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 14:38:12 crc kubenswrapper[5037]: W1126 14:38:12.141935 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7f05291f_1331_411b_9971_c71218d11a35.slice/crio-ab253e4e7b760be53f981740689112a54ee580da41e5e706ef1538a995af2a47 WatchSource:0}: Error finding container ab253e4e7b760be53f981740689112a54ee580da41e5e706ef1538a995af2a47: Status 404 returned error can't find the container with id ab253e4e7b760be53f981740689112a54ee580da41e5e706ef1538a995af2a47 Nov 26 14:38:12 crc kubenswrapper[5037]: I1126 14:38:12.172553 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 26 14:38:12 crc kubenswrapper[5037]: I1126 14:38:12.175386 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 26 14:38:12 crc kubenswrapper[5037]: I1126 14:38:12.180855 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 26 14:38:12 crc kubenswrapper[5037]: I1126 14:38:12.181323 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 26 14:38:12 crc kubenswrapper[5037]: I1126 14:38:12.184160 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 26 14:38:12 crc kubenswrapper[5037]: I1126 14:38:12.184896 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-w9s2c" Nov 26 14:38:12 crc kubenswrapper[5037]: I1126 14:38:12.186353 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 26 14:38:12 crc kubenswrapper[5037]: I1126 14:38:12.186741 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 26 14:38:12 crc kubenswrapper[5037]: I1126 14:38:12.268809 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-operator-scripts\") pod \"openstack-galera-0\" (UID: \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\") " pod="openstack/openstack-galera-0" Nov 26 14:38:12 crc kubenswrapper[5037]: I1126 14:38:12.268873 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-config-data-generated\") pod \"openstack-galera-0\" (UID: \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\") " pod="openstack/openstack-galera-0" Nov 26 14:38:12 crc kubenswrapper[5037]: I1126 14:38:12.268892 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-config-data-default\") pod \"openstack-galera-0\" (UID: \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\") " pod="openstack/openstack-galera-0" Nov 26 14:38:12 crc kubenswrapper[5037]: I1126 14:38:12.268923 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbvsm\" (UniqueName: \"kubernetes.io/projected/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-kube-api-access-bbvsm\") pod \"openstack-galera-0\" (UID: \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\") " pod="openstack/openstack-galera-0" Nov 26 14:38:12 crc kubenswrapper[5037]: I1126 14:38:12.268941 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\") " pod="openstack/openstack-galera-0" Nov 26 14:38:12 crc kubenswrapper[5037]: I1126 14:38:12.268961 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-kolla-config\") pod \"openstack-galera-0\" (UID: \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\") " pod="openstack/openstack-galera-0" Nov 26 14:38:12 crc kubenswrapper[5037]: I1126 14:38:12.268985 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-galera-0\" (UID: \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\") " pod="openstack/openstack-galera-0" Nov 26 14:38:12 crc kubenswrapper[5037]: I1126 14:38:12.269044 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\") " pod="openstack/openstack-galera-0" Nov 26 14:38:12 crc kubenswrapper[5037]: I1126 14:38:12.370977 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-operator-scripts\") pod \"openstack-galera-0\" (UID: \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\") " pod="openstack/openstack-galera-0" Nov 26 14:38:12 crc kubenswrapper[5037]: I1126 14:38:12.371495 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-config-data-generated\") pod \"openstack-galera-0\" (UID: \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\") " pod="openstack/openstack-galera-0" Nov 26 14:38:12 crc kubenswrapper[5037]: I1126 14:38:12.371520 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-config-data-default\") pod \"openstack-galera-0\" (UID: \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\") " pod="openstack/openstack-galera-0" Nov 26 14:38:12 crc kubenswrapper[5037]: I1126 14:38:12.371553 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbvsm\" (UniqueName: \"kubernetes.io/projected/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-kube-api-access-bbvsm\") pod \"openstack-galera-0\" (UID: \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\") " pod="openstack/openstack-galera-0" Nov 26 14:38:12 crc kubenswrapper[5037]: I1126 14:38:12.371577 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\") " pod="openstack/openstack-galera-0" Nov 26 14:38:12 crc kubenswrapper[5037]: I1126 14:38:12.371608 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-kolla-config\") pod \"openstack-galera-0\" (UID: \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\") " pod="openstack/openstack-galera-0" Nov 26 14:38:12 crc kubenswrapper[5037]: I1126 14:38:12.371633 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-galera-0\" (UID: \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\") " pod="openstack/openstack-galera-0" Nov 26 14:38:12 crc kubenswrapper[5037]: I1126 14:38:12.371678 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\") " pod="openstack/openstack-galera-0" Nov 26 14:38:12 crc kubenswrapper[5037]: I1126 14:38:12.372459 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-config-data-generated\") pod \"openstack-galera-0\" (UID: \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\") " pod="openstack/openstack-galera-0" Nov 26 14:38:12 crc kubenswrapper[5037]: I1126 14:38:12.373174 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-config-data-default\") pod \"openstack-galera-0\" (UID: \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\") " pod="openstack/openstack-galera-0" Nov 26 14:38:12 crc kubenswrapper[5037]: I1126 14:38:12.373791 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-kolla-config\") pod \"openstack-galera-0\" (UID: \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\") " pod="openstack/openstack-galera-0" Nov 26 14:38:12 crc kubenswrapper[5037]: I1126 14:38:12.374201 5037 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-galera-0\" (UID: \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/openstack-galera-0" Nov 26 14:38:12 crc kubenswrapper[5037]: I1126 14:38:12.397618 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\") " pod="openstack/openstack-galera-0" Nov 26 14:38:12 crc kubenswrapper[5037]: I1126 14:38:12.399702 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-operator-scripts\") pod \"openstack-galera-0\" (UID: \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\") " pod="openstack/openstack-galera-0" Nov 26 14:38:12 crc kubenswrapper[5037]: I1126 14:38:12.420748 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-galera-0\" (UID: \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\") " pod="openstack/openstack-galera-0" Nov 26 14:38:12 crc kubenswrapper[5037]: I1126 14:38:12.421443 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\") " pod="openstack/openstack-galera-0" Nov 26 14:38:12 crc kubenswrapper[5037]: I1126 14:38:12.426930 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbvsm\" (UniqueName: \"kubernetes.io/projected/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-kube-api-access-bbvsm\") pod \"openstack-galera-0\" (UID: \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\") " pod="openstack/openstack-galera-0" Nov 26 14:38:12 crc kubenswrapper[5037]: I1126 14:38:12.430998 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"7f05291f-1331-411b-9971-c71218d11a35","Type":"ContainerStarted","Data":"ab253e4e7b760be53f981740689112a54ee580da41e5e706ef1538a995af2a47"} Nov 26 14:38:12 crc kubenswrapper[5037]: I1126 14:38:12.553199 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.174161 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.458925 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"bf45bdb2-c880-43f7-b30a-4d1b36363f7d","Type":"ContainerStarted","Data":"0e0c43c14d7c40f72d888f92a2463eb3fd5e8a79deb60cac803d58125d95319a"} Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.675233 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.676809 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.679181 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-765dp" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.680968 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.681174 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.681349 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.691495 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.693016 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.703793 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.729568 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.729719 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-xlwjs" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.730043 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.741831 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.805234 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/300dce8f-4337-4707-8075-f32b93f03e4f-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"300dce8f-4337-4707-8075-f32b93f03e4f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.805395 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-cell1-galera-0\" (UID: \"300dce8f-4337-4707-8075-f32b93f03e4f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.805806 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/300dce8f-4337-4707-8075-f32b93f03e4f-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"300dce8f-4337-4707-8075-f32b93f03e4f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.806007 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/300dce8f-4337-4707-8075-f32b93f03e4f-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"300dce8f-4337-4707-8075-f32b93f03e4f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.806202 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/300dce8f-4337-4707-8075-f32b93f03e4f-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"300dce8f-4337-4707-8075-f32b93f03e4f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.806548 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/bdd4849b-e92e-473d-88d0-74c060c04eb7-kolla-config\") pod \"memcached-0\" (UID: \"bdd4849b-e92e-473d-88d0-74c060c04eb7\") " pod="openstack/memcached-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.806927 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/300dce8f-4337-4707-8075-f32b93f03e4f-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"300dce8f-4337-4707-8075-f32b93f03e4f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.807034 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bdd4849b-e92e-473d-88d0-74c060c04eb7-combined-ca-bundle\") pod \"memcached-0\" (UID: \"bdd4849b-e92e-473d-88d0-74c060c04eb7\") " pod="openstack/memcached-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.807247 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xmtdx\" (UniqueName: \"kubernetes.io/projected/bdd4849b-e92e-473d-88d0-74c060c04eb7-kube-api-access-xmtdx\") pod \"memcached-0\" (UID: \"bdd4849b-e92e-473d-88d0-74c060c04eb7\") " pod="openstack/memcached-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.807328 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bdd4849b-e92e-473d-88d0-74c060c04eb7-config-data\") pod \"memcached-0\" (UID: \"bdd4849b-e92e-473d-88d0-74c060c04eb7\") " pod="openstack/memcached-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.807379 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/300dce8f-4337-4707-8075-f32b93f03e4f-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"300dce8f-4337-4707-8075-f32b93f03e4f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.807473 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m6gnn\" (UniqueName: \"kubernetes.io/projected/300dce8f-4337-4707-8075-f32b93f03e4f-kube-api-access-m6gnn\") pod \"openstack-cell1-galera-0\" (UID: \"300dce8f-4337-4707-8075-f32b93f03e4f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.807504 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/bdd4849b-e92e-473d-88d0-74c060c04eb7-memcached-tls-certs\") pod \"memcached-0\" (UID: \"bdd4849b-e92e-473d-88d0-74c060c04eb7\") " pod="openstack/memcached-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.909389 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xmtdx\" (UniqueName: \"kubernetes.io/projected/bdd4849b-e92e-473d-88d0-74c060c04eb7-kube-api-access-xmtdx\") pod \"memcached-0\" (UID: \"bdd4849b-e92e-473d-88d0-74c060c04eb7\") " pod="openstack/memcached-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.909502 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bdd4849b-e92e-473d-88d0-74c060c04eb7-config-data\") pod \"memcached-0\" (UID: \"bdd4849b-e92e-473d-88d0-74c060c04eb7\") " pod="openstack/memcached-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.909531 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/300dce8f-4337-4707-8075-f32b93f03e4f-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"300dce8f-4337-4707-8075-f32b93f03e4f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.911028 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/300dce8f-4337-4707-8075-f32b93f03e4f-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"300dce8f-4337-4707-8075-f32b93f03e4f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.911108 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m6gnn\" (UniqueName: \"kubernetes.io/projected/300dce8f-4337-4707-8075-f32b93f03e4f-kube-api-access-m6gnn\") pod \"openstack-cell1-galera-0\" (UID: \"300dce8f-4337-4707-8075-f32b93f03e4f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.911137 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/bdd4849b-e92e-473d-88d0-74c060c04eb7-memcached-tls-certs\") pod \"memcached-0\" (UID: \"bdd4849b-e92e-473d-88d0-74c060c04eb7\") " pod="openstack/memcached-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.911027 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bdd4849b-e92e-473d-88d0-74c060c04eb7-config-data\") pod \"memcached-0\" (UID: \"bdd4849b-e92e-473d-88d0-74c060c04eb7\") " pod="openstack/memcached-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.911194 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/300dce8f-4337-4707-8075-f32b93f03e4f-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"300dce8f-4337-4707-8075-f32b93f03e4f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.911243 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-cell1-galera-0\" (UID: \"300dce8f-4337-4707-8075-f32b93f03e4f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.911262 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/300dce8f-4337-4707-8075-f32b93f03e4f-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"300dce8f-4337-4707-8075-f32b93f03e4f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.911530 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/300dce8f-4337-4707-8075-f32b93f03e4f-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"300dce8f-4337-4707-8075-f32b93f03e4f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.913588 5037 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-cell1-galera-0\" (UID: \"300dce8f-4337-4707-8075-f32b93f03e4f\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/openstack-cell1-galera-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.911278 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/300dce8f-4337-4707-8075-f32b93f03e4f-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"300dce8f-4337-4707-8075-f32b93f03e4f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.917686 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/300dce8f-4337-4707-8075-f32b93f03e4f-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"300dce8f-4337-4707-8075-f32b93f03e4f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.917898 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/bdd4849b-e92e-473d-88d0-74c060c04eb7-kolla-config\") pod \"memcached-0\" (UID: \"bdd4849b-e92e-473d-88d0-74c060c04eb7\") " pod="openstack/memcached-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.918572 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/300dce8f-4337-4707-8075-f32b93f03e4f-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"300dce8f-4337-4707-8075-f32b93f03e4f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.919596 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/bdd4849b-e92e-473d-88d0-74c060c04eb7-kolla-config\") pod \"memcached-0\" (UID: \"bdd4849b-e92e-473d-88d0-74c060c04eb7\") " pod="openstack/memcached-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.919732 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/300dce8f-4337-4707-8075-f32b93f03e4f-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"300dce8f-4337-4707-8075-f32b93f03e4f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.920343 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/300dce8f-4337-4707-8075-f32b93f03e4f-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"300dce8f-4337-4707-8075-f32b93f03e4f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.921000 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bdd4849b-e92e-473d-88d0-74c060c04eb7-combined-ca-bundle\") pod \"memcached-0\" (UID: \"bdd4849b-e92e-473d-88d0-74c060c04eb7\") " pod="openstack/memcached-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.925461 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/300dce8f-4337-4707-8075-f32b93f03e4f-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"300dce8f-4337-4707-8075-f32b93f03e4f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.926150 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/300dce8f-4337-4707-8075-f32b93f03e4f-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"300dce8f-4337-4707-8075-f32b93f03e4f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.932894 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/bdd4849b-e92e-473d-88d0-74c060c04eb7-memcached-tls-certs\") pod \"memcached-0\" (UID: \"bdd4849b-e92e-473d-88d0-74c060c04eb7\") " pod="openstack/memcached-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.947688 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bdd4849b-e92e-473d-88d0-74c060c04eb7-combined-ca-bundle\") pod \"memcached-0\" (UID: \"bdd4849b-e92e-473d-88d0-74c060c04eb7\") " pod="openstack/memcached-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.952979 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xmtdx\" (UniqueName: \"kubernetes.io/projected/bdd4849b-e92e-473d-88d0-74c060c04eb7-kube-api-access-xmtdx\") pod \"memcached-0\" (UID: \"bdd4849b-e92e-473d-88d0-74c060c04eb7\") " pod="openstack/memcached-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.954638 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m6gnn\" (UniqueName: \"kubernetes.io/projected/300dce8f-4337-4707-8075-f32b93f03e4f-kube-api-access-m6gnn\") pod \"openstack-cell1-galera-0\" (UID: \"300dce8f-4337-4707-8075-f32b93f03e4f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 14:38:13 crc kubenswrapper[5037]: I1126 14:38:13.970500 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-cell1-galera-0\" (UID: \"300dce8f-4337-4707-8075-f32b93f03e4f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 14:38:14 crc kubenswrapper[5037]: I1126 14:38:14.074172 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 26 14:38:14 crc kubenswrapper[5037]: I1126 14:38:14.095267 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 26 14:38:14 crc kubenswrapper[5037]: I1126 14:38:14.639240 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 26 14:38:14 crc kubenswrapper[5037]: I1126 14:38:14.884577 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 26 14:38:15 crc kubenswrapper[5037]: I1126 14:38:15.447179 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 14:38:15 crc kubenswrapper[5037]: I1126 14:38:15.448540 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 14:38:15 crc kubenswrapper[5037]: I1126 14:38:15.452676 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k82xn\" (UniqueName: \"kubernetes.io/projected/a7d2fc57-9486-4084-aabe-96ed92c69f2c-kube-api-access-k82xn\") pod \"kube-state-metrics-0\" (UID: \"a7d2fc57-9486-4084-aabe-96ed92c69f2c\") " pod="openstack/kube-state-metrics-0" Nov 26 14:38:15 crc kubenswrapper[5037]: I1126 14:38:15.453475 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-sfdcp" Nov 26 14:38:15 crc kubenswrapper[5037]: I1126 14:38:15.458375 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 14:38:15 crc kubenswrapper[5037]: I1126 14:38:15.554717 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k82xn\" (UniqueName: \"kubernetes.io/projected/a7d2fc57-9486-4084-aabe-96ed92c69f2c-kube-api-access-k82xn\") pod \"kube-state-metrics-0\" (UID: \"a7d2fc57-9486-4084-aabe-96ed92c69f2c\") " pod="openstack/kube-state-metrics-0" Nov 26 14:38:15 crc kubenswrapper[5037]: I1126 14:38:15.616422 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k82xn\" (UniqueName: \"kubernetes.io/projected/a7d2fc57-9486-4084-aabe-96ed92c69f2c-kube-api-access-k82xn\") pod \"kube-state-metrics-0\" (UID: \"a7d2fc57-9486-4084-aabe-96ed92c69f2c\") " pod="openstack/kube-state-metrics-0" Nov 26 14:38:15 crc kubenswrapper[5037]: I1126 14:38:15.814792 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.079222 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.081279 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.089991 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.090323 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.090341 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.091176 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.100217 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.105205 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-xhdcm" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.167309 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\") " pod="openstack/ovsdbserver-nb-0" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.167363 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-config\") pod \"ovsdbserver-nb-0\" (UID: \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\") " pod="openstack/ovsdbserver-nb-0" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.167399 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\") " pod="openstack/ovsdbserver-nb-0" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.167428 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-nb-0\" (UID: \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\") " pod="openstack/ovsdbserver-nb-0" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.167520 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6bsk4\" (UniqueName: \"kubernetes.io/projected/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-kube-api-access-6bsk4\") pod \"ovsdbserver-nb-0\" (UID: \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\") " pod="openstack/ovsdbserver-nb-0" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.167559 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\") " pod="openstack/ovsdbserver-nb-0" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.167583 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\") " pod="openstack/ovsdbserver-nb-0" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.167665 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\") " pod="openstack/ovsdbserver-nb-0" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.270138 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6bsk4\" (UniqueName: \"kubernetes.io/projected/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-kube-api-access-6bsk4\") pod \"ovsdbserver-nb-0\" (UID: \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\") " pod="openstack/ovsdbserver-nb-0" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.270234 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\") " pod="openstack/ovsdbserver-nb-0" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.270262 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\") " pod="openstack/ovsdbserver-nb-0" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.270318 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\") " pod="openstack/ovsdbserver-nb-0" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.270424 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-config\") pod \"ovsdbserver-nb-0\" (UID: \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\") " pod="openstack/ovsdbserver-nb-0" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.270449 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\") " pod="openstack/ovsdbserver-nb-0" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.270488 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\") " pod="openstack/ovsdbserver-nb-0" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.270518 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-nb-0\" (UID: \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\") " pod="openstack/ovsdbserver-nb-0" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.270962 5037 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-nb-0\" (UID: \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/ovsdbserver-nb-0" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.271646 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\") " pod="openstack/ovsdbserver-nb-0" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.273108 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-config\") pod \"ovsdbserver-nb-0\" (UID: \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\") " pod="openstack/ovsdbserver-nb-0" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.274603 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\") " pod="openstack/ovsdbserver-nb-0" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.279845 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\") " pod="openstack/ovsdbserver-nb-0" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.280487 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\") " pod="openstack/ovsdbserver-nb-0" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.282169 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\") " pod="openstack/ovsdbserver-nb-0" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.297082 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6bsk4\" (UniqueName: \"kubernetes.io/projected/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-kube-api-access-6bsk4\") pod \"ovsdbserver-nb-0\" (UID: \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\") " pod="openstack/ovsdbserver-nb-0" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.313709 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-nb-0\" (UID: \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\") " pod="openstack/ovsdbserver-nb-0" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.373247 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ptz2q"] Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.374878 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ptz2q" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.378910 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.379892 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.380543 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-447tj" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.391733 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ptz2q"] Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.408906 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.435513 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-264cs"] Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.438337 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-264cs" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.449253 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-264cs"] Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.474467 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/80ce8a9a-aa28-40e4-ac35-c7d379224208-etc-ovs\") pod \"ovn-controller-ovs-264cs\" (UID: \"80ce8a9a-aa28-40e4-ac35-c7d379224208\") " pod="openstack/ovn-controller-ovs-264cs" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.474533 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-72s6r\" (UniqueName: \"kubernetes.io/projected/80ce8a9a-aa28-40e4-ac35-c7d379224208-kube-api-access-72s6r\") pod \"ovn-controller-ovs-264cs\" (UID: \"80ce8a9a-aa28-40e4-ac35-c7d379224208\") " pod="openstack/ovn-controller-ovs-264cs" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.474580 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s5rjc\" (UniqueName: \"kubernetes.io/projected/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-kube-api-access-s5rjc\") pod \"ovn-controller-ptz2q\" (UID: \"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf\") " pod="openstack/ovn-controller-ptz2q" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.474650 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-var-log-ovn\") pod \"ovn-controller-ptz2q\" (UID: \"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf\") " pod="openstack/ovn-controller-ptz2q" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.474684 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/80ce8a9a-aa28-40e4-ac35-c7d379224208-var-lib\") pod \"ovn-controller-ovs-264cs\" (UID: \"80ce8a9a-aa28-40e4-ac35-c7d379224208\") " pod="openstack/ovn-controller-ovs-264cs" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.474708 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/80ce8a9a-aa28-40e4-ac35-c7d379224208-var-run\") pod \"ovn-controller-ovs-264cs\" (UID: \"80ce8a9a-aa28-40e4-ac35-c7d379224208\") " pod="openstack/ovn-controller-ovs-264cs" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.474745 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-scripts\") pod \"ovn-controller-ptz2q\" (UID: \"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf\") " pod="openstack/ovn-controller-ptz2q" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.474769 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-var-run-ovn\") pod \"ovn-controller-ptz2q\" (UID: \"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf\") " pod="openstack/ovn-controller-ptz2q" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.474818 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-combined-ca-bundle\") pod \"ovn-controller-ptz2q\" (UID: \"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf\") " pod="openstack/ovn-controller-ptz2q" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.474864 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-var-run\") pod \"ovn-controller-ptz2q\" (UID: \"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf\") " pod="openstack/ovn-controller-ptz2q" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.474922 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/80ce8a9a-aa28-40e4-ac35-c7d379224208-scripts\") pod \"ovn-controller-ovs-264cs\" (UID: \"80ce8a9a-aa28-40e4-ac35-c7d379224208\") " pod="openstack/ovn-controller-ovs-264cs" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.474957 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-ovn-controller-tls-certs\") pod \"ovn-controller-ptz2q\" (UID: \"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf\") " pod="openstack/ovn-controller-ptz2q" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.474991 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/80ce8a9a-aa28-40e4-ac35-c7d379224208-var-log\") pod \"ovn-controller-ovs-264cs\" (UID: \"80ce8a9a-aa28-40e4-ac35-c7d379224208\") " pod="openstack/ovn-controller-ovs-264cs" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.577665 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-var-log-ovn\") pod \"ovn-controller-ptz2q\" (UID: \"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf\") " pod="openstack/ovn-controller-ptz2q" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.577733 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/80ce8a9a-aa28-40e4-ac35-c7d379224208-var-lib\") pod \"ovn-controller-ovs-264cs\" (UID: \"80ce8a9a-aa28-40e4-ac35-c7d379224208\") " pod="openstack/ovn-controller-ovs-264cs" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.577764 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/80ce8a9a-aa28-40e4-ac35-c7d379224208-var-run\") pod \"ovn-controller-ovs-264cs\" (UID: \"80ce8a9a-aa28-40e4-ac35-c7d379224208\") " pod="openstack/ovn-controller-ovs-264cs" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.577803 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-scripts\") pod \"ovn-controller-ptz2q\" (UID: \"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf\") " pod="openstack/ovn-controller-ptz2q" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.577839 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-var-run-ovn\") pod \"ovn-controller-ptz2q\" (UID: \"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf\") " pod="openstack/ovn-controller-ptz2q" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.577894 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-combined-ca-bundle\") pod \"ovn-controller-ptz2q\" (UID: \"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf\") " pod="openstack/ovn-controller-ptz2q" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.578599 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-var-run-ovn\") pod \"ovn-controller-ptz2q\" (UID: \"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf\") " pod="openstack/ovn-controller-ptz2q" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.578717 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/80ce8a9a-aa28-40e4-ac35-c7d379224208-var-run\") pod \"ovn-controller-ovs-264cs\" (UID: \"80ce8a9a-aa28-40e4-ac35-c7d379224208\") " pod="openstack/ovn-controller-ovs-264cs" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.578721 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/80ce8a9a-aa28-40e4-ac35-c7d379224208-var-lib\") pod \"ovn-controller-ovs-264cs\" (UID: \"80ce8a9a-aa28-40e4-ac35-c7d379224208\") " pod="openstack/ovn-controller-ovs-264cs" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.578789 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-var-run\") pod \"ovn-controller-ptz2q\" (UID: \"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf\") " pod="openstack/ovn-controller-ptz2q" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.578789 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-var-log-ovn\") pod \"ovn-controller-ptz2q\" (UID: \"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf\") " pod="openstack/ovn-controller-ptz2q" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.578842 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/80ce8a9a-aa28-40e4-ac35-c7d379224208-scripts\") pod \"ovn-controller-ovs-264cs\" (UID: \"80ce8a9a-aa28-40e4-ac35-c7d379224208\") " pod="openstack/ovn-controller-ovs-264cs" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.578953 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-var-run\") pod \"ovn-controller-ptz2q\" (UID: \"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf\") " pod="openstack/ovn-controller-ptz2q" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.579309 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-ovn-controller-tls-certs\") pod \"ovn-controller-ptz2q\" (UID: \"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf\") " pod="openstack/ovn-controller-ptz2q" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.579383 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/80ce8a9a-aa28-40e4-ac35-c7d379224208-var-log\") pod \"ovn-controller-ovs-264cs\" (UID: \"80ce8a9a-aa28-40e4-ac35-c7d379224208\") " pod="openstack/ovn-controller-ovs-264cs" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.579466 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-72s6r\" (UniqueName: \"kubernetes.io/projected/80ce8a9a-aa28-40e4-ac35-c7d379224208-kube-api-access-72s6r\") pod \"ovn-controller-ovs-264cs\" (UID: \"80ce8a9a-aa28-40e4-ac35-c7d379224208\") " pod="openstack/ovn-controller-ovs-264cs" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.579496 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/80ce8a9a-aa28-40e4-ac35-c7d379224208-etc-ovs\") pod \"ovn-controller-ovs-264cs\" (UID: \"80ce8a9a-aa28-40e4-ac35-c7d379224208\") " pod="openstack/ovn-controller-ovs-264cs" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.579542 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s5rjc\" (UniqueName: \"kubernetes.io/projected/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-kube-api-access-s5rjc\") pod \"ovn-controller-ptz2q\" (UID: \"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf\") " pod="openstack/ovn-controller-ptz2q" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.581141 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-scripts\") pod \"ovn-controller-ptz2q\" (UID: \"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf\") " pod="openstack/ovn-controller-ptz2q" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.581230 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/80ce8a9a-aa28-40e4-ac35-c7d379224208-var-log\") pod \"ovn-controller-ovs-264cs\" (UID: \"80ce8a9a-aa28-40e4-ac35-c7d379224208\") " pod="openstack/ovn-controller-ovs-264cs" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.581374 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/80ce8a9a-aa28-40e4-ac35-c7d379224208-etc-ovs\") pod \"ovn-controller-ovs-264cs\" (UID: \"80ce8a9a-aa28-40e4-ac35-c7d379224208\") " pod="openstack/ovn-controller-ovs-264cs" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.583061 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-ovn-controller-tls-certs\") pod \"ovn-controller-ptz2q\" (UID: \"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf\") " pod="openstack/ovn-controller-ptz2q" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.586343 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/80ce8a9a-aa28-40e4-ac35-c7d379224208-scripts\") pod \"ovn-controller-ovs-264cs\" (UID: \"80ce8a9a-aa28-40e4-ac35-c7d379224208\") " pod="openstack/ovn-controller-ovs-264cs" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.593335 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-combined-ca-bundle\") pod \"ovn-controller-ptz2q\" (UID: \"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf\") " pod="openstack/ovn-controller-ptz2q" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.595294 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"300dce8f-4337-4707-8075-f32b93f03e4f","Type":"ContainerStarted","Data":"c21db9721ff60f887a92e254c47928c31b0ebd15e9dd0d260534a5c1184d00c8"} Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.597013 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"bdd4849b-e92e-473d-88d0-74c060c04eb7","Type":"ContainerStarted","Data":"e0cff50483b1be8a51565b59ec847e38294733a3692b2001252edeee0e2dc5bf"} Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.602520 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-72s6r\" (UniqueName: \"kubernetes.io/projected/80ce8a9a-aa28-40e4-ac35-c7d379224208-kube-api-access-72s6r\") pod \"ovn-controller-ovs-264cs\" (UID: \"80ce8a9a-aa28-40e4-ac35-c7d379224208\") " pod="openstack/ovn-controller-ovs-264cs" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.606559 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s5rjc\" (UniqueName: \"kubernetes.io/projected/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-kube-api-access-s5rjc\") pod \"ovn-controller-ptz2q\" (UID: \"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf\") " pod="openstack/ovn-controller-ptz2q" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.782835 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ptz2q" Nov 26 14:38:20 crc kubenswrapper[5037]: I1126 14:38:20.795439 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-264cs" Nov 26 14:38:23 crc kubenswrapper[5037]: I1126 14:38:23.317939 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 26 14:38:23 crc kubenswrapper[5037]: I1126 14:38:23.320719 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 26 14:38:23 crc kubenswrapper[5037]: I1126 14:38:23.325010 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 26 14:38:23 crc kubenswrapper[5037]: I1126 14:38:23.325084 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 26 14:38:23 crc kubenswrapper[5037]: I1126 14:38:23.325704 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 26 14:38:23 crc kubenswrapper[5037]: I1126 14:38:23.325915 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-s68mt" Nov 26 14:38:23 crc kubenswrapper[5037]: I1126 14:38:23.366538 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 26 14:38:23 crc kubenswrapper[5037]: I1126 14:38:23.449305 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\") " pod="openstack/ovsdbserver-sb-0" Nov 26 14:38:23 crc kubenswrapper[5037]: I1126 14:38:23.449398 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-config\") pod \"ovsdbserver-sb-0\" (UID: \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\") " pod="openstack/ovsdbserver-sb-0" Nov 26 14:38:23 crc kubenswrapper[5037]: I1126 14:38:23.449631 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-sb-0\" (UID: \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\") " pod="openstack/ovsdbserver-sb-0" Nov 26 14:38:23 crc kubenswrapper[5037]: I1126 14:38:23.449725 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\") " pod="openstack/ovsdbserver-sb-0" Nov 26 14:38:23 crc kubenswrapper[5037]: I1126 14:38:23.449813 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\") " pod="openstack/ovsdbserver-sb-0" Nov 26 14:38:23 crc kubenswrapper[5037]: I1126 14:38:23.449849 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\") " pod="openstack/ovsdbserver-sb-0" Nov 26 14:38:23 crc kubenswrapper[5037]: I1126 14:38:23.449893 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\") " pod="openstack/ovsdbserver-sb-0" Nov 26 14:38:23 crc kubenswrapper[5037]: I1126 14:38:23.449938 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xsqhm\" (UniqueName: \"kubernetes.io/projected/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-kube-api-access-xsqhm\") pod \"ovsdbserver-sb-0\" (UID: \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\") " pod="openstack/ovsdbserver-sb-0" Nov 26 14:38:23 crc kubenswrapper[5037]: I1126 14:38:23.554852 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-sb-0\" (UID: \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\") " pod="openstack/ovsdbserver-sb-0" Nov 26 14:38:23 crc kubenswrapper[5037]: I1126 14:38:23.554930 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\") " pod="openstack/ovsdbserver-sb-0" Nov 26 14:38:23 crc kubenswrapper[5037]: I1126 14:38:23.554974 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\") " pod="openstack/ovsdbserver-sb-0" Nov 26 14:38:23 crc kubenswrapper[5037]: I1126 14:38:23.555000 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\") " pod="openstack/ovsdbserver-sb-0" Nov 26 14:38:23 crc kubenswrapper[5037]: I1126 14:38:23.555027 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\") " pod="openstack/ovsdbserver-sb-0" Nov 26 14:38:23 crc kubenswrapper[5037]: I1126 14:38:23.555059 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xsqhm\" (UniqueName: \"kubernetes.io/projected/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-kube-api-access-xsqhm\") pod \"ovsdbserver-sb-0\" (UID: \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\") " pod="openstack/ovsdbserver-sb-0" Nov 26 14:38:23 crc kubenswrapper[5037]: I1126 14:38:23.555177 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\") " pod="openstack/ovsdbserver-sb-0" Nov 26 14:38:23 crc kubenswrapper[5037]: I1126 14:38:23.555228 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-config\") pod \"ovsdbserver-sb-0\" (UID: \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\") " pod="openstack/ovsdbserver-sb-0" Nov 26 14:38:23 crc kubenswrapper[5037]: I1126 14:38:23.555486 5037 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-sb-0\" (UID: \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/ovsdbserver-sb-0" Nov 26 14:38:23 crc kubenswrapper[5037]: I1126 14:38:23.555736 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\") " pod="openstack/ovsdbserver-sb-0" Nov 26 14:38:23 crc kubenswrapper[5037]: I1126 14:38:23.556307 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-config\") pod \"ovsdbserver-sb-0\" (UID: \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\") " pod="openstack/ovsdbserver-sb-0" Nov 26 14:38:23 crc kubenswrapper[5037]: I1126 14:38:23.557275 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\") " pod="openstack/ovsdbserver-sb-0" Nov 26 14:38:23 crc kubenswrapper[5037]: I1126 14:38:23.564799 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\") " pod="openstack/ovsdbserver-sb-0" Nov 26 14:38:23 crc kubenswrapper[5037]: I1126 14:38:23.564799 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\") " pod="openstack/ovsdbserver-sb-0" Nov 26 14:38:23 crc kubenswrapper[5037]: I1126 14:38:23.565412 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\") " pod="openstack/ovsdbserver-sb-0" Nov 26 14:38:23 crc kubenswrapper[5037]: I1126 14:38:23.575683 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xsqhm\" (UniqueName: \"kubernetes.io/projected/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-kube-api-access-xsqhm\") pod \"ovsdbserver-sb-0\" (UID: \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\") " pod="openstack/ovsdbserver-sb-0" Nov 26 14:38:23 crc kubenswrapper[5037]: I1126 14:38:23.584110 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-sb-0\" (UID: \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\") " pod="openstack/ovsdbserver-sb-0" Nov 26 14:38:23 crc kubenswrapper[5037]: I1126 14:38:23.669449 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 26 14:38:33 crc kubenswrapper[5037]: E1126 14:38:33.612627 5037 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba" Nov 26 14:38:33 crc kubenswrapper[5037]: E1126 14:38:33.613792 5037 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ns68w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-6486446b9f-gc7pm_openstack(073bdcf3-16fa-4b27-8a82-709bf0e1bf1c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 14:38:33 crc kubenswrapper[5037]: E1126 14:38:33.614997 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-6486446b9f-gc7pm" podUID="073bdcf3-16fa-4b27-8a82-709bf0e1bf1c" Nov 26 14:38:33 crc kubenswrapper[5037]: E1126 14:38:33.650126 5037 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba" Nov 26 14:38:33 crc kubenswrapper[5037]: E1126 14:38:33.650897 5037 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mhmx5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-7bdd77c89-qsvms_openstack(a594ed83-eda8-4667-825e-ace74544882c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 14:38:33 crc kubenswrapper[5037]: E1126 14:38:33.652065 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-7bdd77c89-qsvms" podUID="a594ed83-eda8-4667-825e-ace74544882c" Nov 26 14:38:33 crc kubenswrapper[5037]: E1126 14:38:33.741034 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba\\\"\"" pod="openstack/dnsmasq-dns-6486446b9f-gc7pm" podUID="073bdcf3-16fa-4b27-8a82-709bf0e1bf1c" Nov 26 14:38:42 crc kubenswrapper[5037]: E1126 14:38:42.621833 5037 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq@sha256:95d67f51dfedd5bd3ec785b488425295b2d8c41feae3e6386ef471615381809b" Nov 26 14:38:42 crc kubenswrapper[5037]: E1126 14:38:42.622840 5037 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq@sha256:95d67f51dfedd5bd3ec785b488425295b2d8c41feae3e6386ef471615381809b,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-n275f,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-0_openstack(ba78b94a-32d0-4377-ac41-ffd036b241bf): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 14:38:42 crc kubenswrapper[5037]: E1126 14:38:42.624001 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-0" podUID="ba78b94a-32d0-4377-ac41-ffd036b241bf" Nov 26 14:38:42 crc kubenswrapper[5037]: E1126 14:38:42.812766 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq@sha256:95d67f51dfedd5bd3ec785b488425295b2d8c41feae3e6386ef471615381809b\\\"\"" pod="openstack/rabbitmq-server-0" podUID="ba78b94a-32d0-4377-ac41-ffd036b241bf" Nov 26 14:38:44 crc kubenswrapper[5037]: E1126 14:38:44.601499 5037 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb@sha256:10452e2144368e2f128c8fb8ef9e54880b06ef1d71d9f084a0217dcb099c51ce" Nov 26 14:38:44 crc kubenswrapper[5037]: E1126 14:38:44.602020 5037 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:10452e2144368e2f128c8fb8ef9e54880b06ef1d71d9f084a0217dcb099c51ce,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bbvsm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-galera-0_openstack(bf45bdb2-c880-43f7-b30a-4d1b36363f7d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 14:38:44 crc kubenswrapper[5037]: E1126 14:38:44.603148 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-galera-0" podUID="bf45bdb2-c880-43f7-b30a-4d1b36363f7d" Nov 26 14:38:44 crc kubenswrapper[5037]: E1126 14:38:44.624022 5037 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba" Nov 26 14:38:44 crc kubenswrapper[5037]: E1126 14:38:44.624243 5037 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bs6x6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-6584b49599-56g66_openstack(516a4181-8e5b-4cd1-8a64-e51748a3560d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 14:38:44 crc kubenswrapper[5037]: E1126 14:38:44.625586 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-6584b49599-56g66" podUID="516a4181-8e5b-4cd1-8a64-e51748a3560d" Nov 26 14:38:44 crc kubenswrapper[5037]: E1126 14:38:44.647137 5037 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq@sha256:95d67f51dfedd5bd3ec785b488425295b2d8c41feae3e6386ef471615381809b" Nov 26 14:38:44 crc kubenswrapper[5037]: E1126 14:38:44.647343 5037 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq@sha256:95d67f51dfedd5bd3ec785b488425295b2d8c41feae3e6386ef471615381809b,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kksth,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(7f05291f-1331-411b-9971-c71218d11a35): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 14:38:44 crc kubenswrapper[5037]: E1126 14:38:44.649722 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="7f05291f-1331-411b-9971-c71218d11a35" Nov 26 14:38:44 crc kubenswrapper[5037]: E1126 14:38:44.646587 5037 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb@sha256:10452e2144368e2f128c8fb8ef9e54880b06ef1d71d9f084a0217dcb099c51ce" Nov 26 14:38:44 crc kubenswrapper[5037]: E1126 14:38:44.657077 5037 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:10452e2144368e2f128c8fb8ef9e54880b06ef1d71d9f084a0217dcb099c51ce,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-m6gnn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-cell1-galera-0_openstack(300dce8f-4337-4707-8075-f32b93f03e4f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 14:38:44 crc kubenswrapper[5037]: E1126 14:38:44.658987 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-cell1-galera-0" podUID="300dce8f-4337-4707-8075-f32b93f03e4f" Nov 26 14:38:44 crc kubenswrapper[5037]: I1126 14:38:44.660474 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bdd77c89-qsvms" Nov 26 14:38:44 crc kubenswrapper[5037]: E1126 14:38:44.708793 5037 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba" Nov 26 14:38:44 crc kubenswrapper[5037]: E1126 14:38:44.709074 5037 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-zlmdc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-7c6d9948dc-d58hm_openstack(0346fe68-c180-481b-879e-59ee91287a7e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 14:38:44 crc kubenswrapper[5037]: E1126 14:38:44.710257 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-7c6d9948dc-d58hm" podUID="0346fe68-c180-481b-879e-59ee91287a7e" Nov 26 14:38:44 crc kubenswrapper[5037]: I1126 14:38:44.787228 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a594ed83-eda8-4667-825e-ace74544882c-config\") pod \"a594ed83-eda8-4667-825e-ace74544882c\" (UID: \"a594ed83-eda8-4667-825e-ace74544882c\") " Nov 26 14:38:44 crc kubenswrapper[5037]: I1126 14:38:44.787390 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mhmx5\" (UniqueName: \"kubernetes.io/projected/a594ed83-eda8-4667-825e-ace74544882c-kube-api-access-mhmx5\") pod \"a594ed83-eda8-4667-825e-ace74544882c\" (UID: \"a594ed83-eda8-4667-825e-ace74544882c\") " Nov 26 14:38:44 crc kubenswrapper[5037]: I1126 14:38:44.787872 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a594ed83-eda8-4667-825e-ace74544882c-config" (OuterVolumeSpecName: "config") pod "a594ed83-eda8-4667-825e-ace74544882c" (UID: "a594ed83-eda8-4667-825e-ace74544882c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:38:44 crc kubenswrapper[5037]: I1126 14:38:44.792768 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a594ed83-eda8-4667-825e-ace74544882c-kube-api-access-mhmx5" (OuterVolumeSpecName: "kube-api-access-mhmx5") pod "a594ed83-eda8-4667-825e-ace74544882c" (UID: "a594ed83-eda8-4667-825e-ace74544882c"). InnerVolumeSpecName "kube-api-access-mhmx5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:38:44 crc kubenswrapper[5037]: I1126 14:38:44.856649 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bdd77c89-qsvms" Nov 26 14:38:44 crc kubenswrapper[5037]: I1126 14:38:44.856878 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bdd77c89-qsvms" event={"ID":"a594ed83-eda8-4667-825e-ace74544882c","Type":"ContainerDied","Data":"ab2fb9e9317f50c3a655aed4cf68fe157b15e3693abd0eff26ece01071b28f0b"} Nov 26 14:38:44 crc kubenswrapper[5037]: E1126 14:38:44.859586 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb@sha256:10452e2144368e2f128c8fb8ef9e54880b06ef1d71d9f084a0217dcb099c51ce\\\"\"" pod="openstack/openstack-galera-0" podUID="bf45bdb2-c880-43f7-b30a-4d1b36363f7d" Nov 26 14:38:44 crc kubenswrapper[5037]: E1126 14:38:44.859778 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb@sha256:10452e2144368e2f128c8fb8ef9e54880b06ef1d71d9f084a0217dcb099c51ce\\\"\"" pod="openstack/openstack-cell1-galera-0" podUID="300dce8f-4337-4707-8075-f32b93f03e4f" Nov 26 14:38:44 crc kubenswrapper[5037]: E1126 14:38:44.859994 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba\\\"\"" pod="openstack/dnsmasq-dns-7c6d9948dc-d58hm" podUID="0346fe68-c180-481b-879e-59ee91287a7e" Nov 26 14:38:44 crc kubenswrapper[5037]: E1126 14:38:44.860057 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq@sha256:95d67f51dfedd5bd3ec785b488425295b2d8c41feae3e6386ef471615381809b\\\"\"" pod="openstack/rabbitmq-cell1-server-0" podUID="7f05291f-1331-411b-9971-c71218d11a35" Nov 26 14:38:44 crc kubenswrapper[5037]: I1126 14:38:44.891965 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mhmx5\" (UniqueName: \"kubernetes.io/projected/a594ed83-eda8-4667-825e-ace74544882c-kube-api-access-mhmx5\") on node \"crc\" DevicePath \"\"" Nov 26 14:38:44 crc kubenswrapper[5037]: I1126 14:38:44.892008 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a594ed83-eda8-4667-825e-ace74544882c-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:38:45 crc kubenswrapper[5037]: I1126 14:38:45.048469 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7bdd77c89-qsvms"] Nov 26 14:38:45 crc kubenswrapper[5037]: I1126 14:38:45.055628 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7bdd77c89-qsvms"] Nov 26 14:38:45 crc kubenswrapper[5037]: E1126 14:38:45.539173 5037 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-memcached@sha256:36a0fb31978aee0ded2483de311631e64a644d0b0685b5b055f65ede7eb8e8a2" Nov 26 14:38:45 crc kubenswrapper[5037]: E1126 14:38:45.539502 5037 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:memcached,Image:quay.io/podified-antelope-centos9/openstack-memcached@sha256:36a0fb31978aee0ded2483de311631e64a644d0b0685b5b055f65ede7eb8e8a2,Command:[/usr/bin/dumb-init -- /usr/local/bin/kolla_start],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:memcached,HostPort:0,ContainerPort:11211,Protocol:TCP,HostIP:,},ContainerPort{Name:memcached-tls,HostPort:0,ContainerPort:11212,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:POD_IPS,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIPs,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:CONFIG_HASH,Value:n696h5dbh588h575h55fh564hdfh4h94h558h5d9h578hf5h587h669hf8h54dhbch7fhffh5c9hfch5bhf8hbdh54dh55fh54h5fbh599h57ch85q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/src,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/certs/memcached.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/private/memcached.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xmtdx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42457,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42457,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod memcached-0_openstack(bdd4849b-e92e-473d-88d0-74c060c04eb7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 14:38:45 crc kubenswrapper[5037]: E1126 14:38:45.541870 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/memcached-0" podUID="bdd4849b-e92e-473d-88d0-74c060c04eb7" Nov 26 14:38:45 crc kubenswrapper[5037]: I1126 14:38:45.624873 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6584b49599-56g66" Nov 26 14:38:45 crc kubenswrapper[5037]: I1126 14:38:45.711092 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/516a4181-8e5b-4cd1-8a64-e51748a3560d-config\") pod \"516a4181-8e5b-4cd1-8a64-e51748a3560d\" (UID: \"516a4181-8e5b-4cd1-8a64-e51748a3560d\") " Nov 26 14:38:45 crc kubenswrapper[5037]: I1126 14:38:45.712106 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/516a4181-8e5b-4cd1-8a64-e51748a3560d-config" (OuterVolumeSpecName: "config") pod "516a4181-8e5b-4cd1-8a64-e51748a3560d" (UID: "516a4181-8e5b-4cd1-8a64-e51748a3560d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:38:45 crc kubenswrapper[5037]: I1126 14:38:45.712231 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bs6x6\" (UniqueName: \"kubernetes.io/projected/516a4181-8e5b-4cd1-8a64-e51748a3560d-kube-api-access-bs6x6\") pod \"516a4181-8e5b-4cd1-8a64-e51748a3560d\" (UID: \"516a4181-8e5b-4cd1-8a64-e51748a3560d\") " Nov 26 14:38:45 crc kubenswrapper[5037]: I1126 14:38:45.712417 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/516a4181-8e5b-4cd1-8a64-e51748a3560d-dns-svc\") pod \"516a4181-8e5b-4cd1-8a64-e51748a3560d\" (UID: \"516a4181-8e5b-4cd1-8a64-e51748a3560d\") " Nov 26 14:38:45 crc kubenswrapper[5037]: I1126 14:38:45.713462 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/516a4181-8e5b-4cd1-8a64-e51748a3560d-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:38:45 crc kubenswrapper[5037]: I1126 14:38:45.715331 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/516a4181-8e5b-4cd1-8a64-e51748a3560d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "516a4181-8e5b-4cd1-8a64-e51748a3560d" (UID: "516a4181-8e5b-4cd1-8a64-e51748a3560d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:38:45 crc kubenswrapper[5037]: I1126 14:38:45.731739 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/516a4181-8e5b-4cd1-8a64-e51748a3560d-kube-api-access-bs6x6" (OuterVolumeSpecName: "kube-api-access-bs6x6") pod "516a4181-8e5b-4cd1-8a64-e51748a3560d" (UID: "516a4181-8e5b-4cd1-8a64-e51748a3560d"). InnerVolumeSpecName "kube-api-access-bs6x6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:38:45 crc kubenswrapper[5037]: I1126 14:38:45.814744 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bs6x6\" (UniqueName: \"kubernetes.io/projected/516a4181-8e5b-4cd1-8a64-e51748a3560d-kube-api-access-bs6x6\") on node \"crc\" DevicePath \"\"" Nov 26 14:38:45 crc kubenswrapper[5037]: I1126 14:38:45.815171 5037 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/516a4181-8e5b-4cd1-8a64-e51748a3560d-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 14:38:45 crc kubenswrapper[5037]: I1126 14:38:45.869233 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6584b49599-56g66" event={"ID":"516a4181-8e5b-4cd1-8a64-e51748a3560d","Type":"ContainerDied","Data":"43d0982df41696ad76091af64635bd9aa148876dde5f3afaac25677dd5fb1e69"} Nov 26 14:38:45 crc kubenswrapper[5037]: I1126 14:38:45.869299 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6584b49599-56g66" Nov 26 14:38:45 crc kubenswrapper[5037]: E1126 14:38:45.872106 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-memcached@sha256:36a0fb31978aee0ded2483de311631e64a644d0b0685b5b055f65ede7eb8e8a2\\\"\"" pod="openstack/memcached-0" podUID="bdd4849b-e92e-473d-88d0-74c060c04eb7" Nov 26 14:38:45 crc kubenswrapper[5037]: I1126 14:38:45.939977 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a594ed83-eda8-4667-825e-ace74544882c" path="/var/lib/kubelet/pods/a594ed83-eda8-4667-825e-ace74544882c/volumes" Nov 26 14:38:45 crc kubenswrapper[5037]: I1126 14:38:45.955968 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6584b49599-56g66"] Nov 26 14:38:45 crc kubenswrapper[5037]: I1126 14:38:45.961841 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6584b49599-56g66"] Nov 26 14:38:46 crc kubenswrapper[5037]: W1126 14:38:46.104135 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda7d2fc57_9486_4084_aabe_96ed92c69f2c.slice/crio-09d7249a95dfdc6e5f7fda78fe3750c374fd28951ca8202d4326cb17513cf3e9 WatchSource:0}: Error finding container 09d7249a95dfdc6e5f7fda78fe3750c374fd28951ca8202d4326cb17513cf3e9: Status 404 returned error can't find the container with id 09d7249a95dfdc6e5f7fda78fe3750c374fd28951ca8202d4326cb17513cf3e9 Nov 26 14:38:46 crc kubenswrapper[5037]: I1126 14:38:46.105317 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 14:38:46 crc kubenswrapper[5037]: I1126 14:38:46.146630 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ptz2q"] Nov 26 14:38:46 crc kubenswrapper[5037]: W1126 14:38:46.148340 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6c8fb953_de6b_48ee_afaf_c0a1a8b3acbf.slice/crio-18687e603da421cde9f2a22d55f4b6415ac7f69c74a6c269c2132a301d73bcab WatchSource:0}: Error finding container 18687e603da421cde9f2a22d55f4b6415ac7f69c74a6c269c2132a301d73bcab: Status 404 returned error can't find the container with id 18687e603da421cde9f2a22d55f4b6415ac7f69c74a6c269c2132a301d73bcab Nov 26 14:38:46 crc kubenswrapper[5037]: I1126 14:38:46.238111 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-264cs"] Nov 26 14:38:46 crc kubenswrapper[5037]: I1126 14:38:46.340399 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 26 14:38:46 crc kubenswrapper[5037]: I1126 14:38:46.876995 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 26 14:38:46 crc kubenswrapper[5037]: W1126 14:38:46.886100 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcdfb3a48_f040_40b1_a9ca_98d7b7f4fa89.slice/crio-7ea225b3f72f1c383a501114dc02fb4b2abc7c21b00bacf892761e49e8546da1 WatchSource:0}: Error finding container 7ea225b3f72f1c383a501114dc02fb4b2abc7c21b00bacf892761e49e8546da1: Status 404 returned error can't find the container with id 7ea225b3f72f1c383a501114dc02fb4b2abc7c21b00bacf892761e49e8546da1 Nov 26 14:38:46 crc kubenswrapper[5037]: I1126 14:38:46.886522 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48","Type":"ContainerStarted","Data":"1ccbea2d11bd29a6293c0d48ba264de5bbf2e107c52b752d443ef961167fa4a8"} Nov 26 14:38:46 crc kubenswrapper[5037]: I1126 14:38:46.893197 5037 generic.go:334] "Generic (PLEG): container finished" podID="073bdcf3-16fa-4b27-8a82-709bf0e1bf1c" containerID="845a045204cfcb44bec0285e07c1da985f5f5b2cce872d3a31832ee3ec411ff4" exitCode=0 Nov 26 14:38:46 crc kubenswrapper[5037]: I1126 14:38:46.893304 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6486446b9f-gc7pm" event={"ID":"073bdcf3-16fa-4b27-8a82-709bf0e1bf1c","Type":"ContainerDied","Data":"845a045204cfcb44bec0285e07c1da985f5f5b2cce872d3a31832ee3ec411ff4"} Nov 26 14:38:46 crc kubenswrapper[5037]: I1126 14:38:46.896999 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"a7d2fc57-9486-4084-aabe-96ed92c69f2c","Type":"ContainerStarted","Data":"09d7249a95dfdc6e5f7fda78fe3750c374fd28951ca8202d4326cb17513cf3e9"} Nov 26 14:38:46 crc kubenswrapper[5037]: I1126 14:38:46.900026 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ptz2q" event={"ID":"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf","Type":"ContainerStarted","Data":"18687e603da421cde9f2a22d55f4b6415ac7f69c74a6c269c2132a301d73bcab"} Nov 26 14:38:46 crc kubenswrapper[5037]: I1126 14:38:46.901123 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-264cs" event={"ID":"80ce8a9a-aa28-40e4-ac35-c7d379224208","Type":"ContainerStarted","Data":"0bb14f6e64679cbe8f0af44beef4596b9d9476ba7856ce4fd8bdabd30a1a1179"} Nov 26 14:38:47 crc kubenswrapper[5037]: I1126 14:38:47.924667 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="516a4181-8e5b-4cd1-8a64-e51748a3560d" path="/var/lib/kubelet/pods/516a4181-8e5b-4cd1-8a64-e51748a3560d/volumes" Nov 26 14:38:47 crc kubenswrapper[5037]: I1126 14:38:47.926082 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6486446b9f-gc7pm" event={"ID":"073bdcf3-16fa-4b27-8a82-709bf0e1bf1c","Type":"ContainerStarted","Data":"ea1009d7d3d459178be36a42c970086c0297f9bb2bdecc2bd997fc30c8d12a06"} Nov 26 14:38:47 crc kubenswrapper[5037]: I1126 14:38:47.926132 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89","Type":"ContainerStarted","Data":"7ea225b3f72f1c383a501114dc02fb4b2abc7c21b00bacf892761e49e8546da1"} Nov 26 14:38:47 crc kubenswrapper[5037]: I1126 14:38:47.931103 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6486446b9f-gc7pm" Nov 26 14:38:47 crc kubenswrapper[5037]: I1126 14:38:47.955647 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6486446b9f-gc7pm" podStartSLOduration=3.957076479 podStartE2EDuration="38.955616478s" podCreationTimestamp="2025-11-26 14:38:09 +0000 UTC" firstStartedPulling="2025-11-26 14:38:10.737907534 +0000 UTC m=+1357.534677718" lastFinishedPulling="2025-11-26 14:38:45.736447533 +0000 UTC m=+1392.533217717" observedRunningTime="2025-11-26 14:38:47.951749794 +0000 UTC m=+1394.748519988" watchObservedRunningTime="2025-11-26 14:38:47.955616478 +0000 UTC m=+1394.752386672" Nov 26 14:38:54 crc kubenswrapper[5037]: I1126 14:38:54.982027 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"a7d2fc57-9486-4084-aabe-96ed92c69f2c","Type":"ContainerStarted","Data":"557776f46cf9b691730791d9711aeffac85522535f1f49783f904007f20687d6"} Nov 26 14:38:54 crc kubenswrapper[5037]: I1126 14:38:54.982729 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 26 14:38:55 crc kubenswrapper[5037]: I1126 14:38:55.002055 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ptz2q" event={"ID":"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf","Type":"ContainerStarted","Data":"4f3ea9d9853eb70966721e3e3e7a15223cf43f7532f45c09e3855990aac57118"} Nov 26 14:38:55 crc kubenswrapper[5037]: I1126 14:38:55.002139 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ptz2q" Nov 26 14:38:55 crc kubenswrapper[5037]: I1126 14:38:55.003908 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=32.054082206 podStartE2EDuration="40.003885387s" podCreationTimestamp="2025-11-26 14:38:15 +0000 UTC" firstStartedPulling="2025-11-26 14:38:46.107015263 +0000 UTC m=+1392.903785447" lastFinishedPulling="2025-11-26 14:38:54.056818444 +0000 UTC m=+1400.853588628" observedRunningTime="2025-11-26 14:38:54.99749115 +0000 UTC m=+1401.794261354" watchObservedRunningTime="2025-11-26 14:38:55.003885387 +0000 UTC m=+1401.800655561" Nov 26 14:38:55 crc kubenswrapper[5037]: I1126 14:38:55.010496 5037 generic.go:334] "Generic (PLEG): container finished" podID="80ce8a9a-aa28-40e4-ac35-c7d379224208" containerID="a1372d4864850cb820cd97d1937d4be30fd34be7f7c558211291bd85334e4082" exitCode=0 Nov 26 14:38:55 crc kubenswrapper[5037]: I1126 14:38:55.010878 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-264cs" event={"ID":"80ce8a9a-aa28-40e4-ac35-c7d379224208","Type":"ContainerDied","Data":"a1372d4864850cb820cd97d1937d4be30fd34be7f7c558211291bd85334e4082"} Nov 26 14:38:55 crc kubenswrapper[5037]: I1126 14:38:55.016107 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48","Type":"ContainerStarted","Data":"7755a538fc4266a4b6a0966882c3dd065bcfa6c8249a1b1f4b57bd1f36608b7a"} Nov 26 14:38:55 crc kubenswrapper[5037]: I1126 14:38:55.047746 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89","Type":"ContainerStarted","Data":"a2145e917b1ed177f4eb5739c42f821fe6ce560720e71b9834ad76a33c523409"} Nov 26 14:38:55 crc kubenswrapper[5037]: I1126 14:38:55.057044 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ptz2q" podStartSLOduration=27.999668612 podStartE2EDuration="35.057022473s" podCreationTimestamp="2025-11-26 14:38:20 +0000 UTC" firstStartedPulling="2025-11-26 14:38:46.149820246 +0000 UTC m=+1392.946590430" lastFinishedPulling="2025-11-26 14:38:53.207174107 +0000 UTC m=+1400.003944291" observedRunningTime="2025-11-26 14:38:55.03640388 +0000 UTC m=+1401.833174074" watchObservedRunningTime="2025-11-26 14:38:55.057022473 +0000 UTC m=+1401.853792657" Nov 26 14:38:55 crc kubenswrapper[5037]: I1126 14:38:55.325431 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6486446b9f-gc7pm" Nov 26 14:38:55 crc kubenswrapper[5037]: I1126 14:38:55.391553 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c6d9948dc-d58hm"] Nov 26 14:38:55 crc kubenswrapper[5037]: I1126 14:38:55.742539 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c6d9948dc-d58hm" Nov 26 14:38:55 crc kubenswrapper[5037]: I1126 14:38:55.823011 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0346fe68-c180-481b-879e-59ee91287a7e-config\") pod \"0346fe68-c180-481b-879e-59ee91287a7e\" (UID: \"0346fe68-c180-481b-879e-59ee91287a7e\") " Nov 26 14:38:55 crc kubenswrapper[5037]: I1126 14:38:55.823099 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zlmdc\" (UniqueName: \"kubernetes.io/projected/0346fe68-c180-481b-879e-59ee91287a7e-kube-api-access-zlmdc\") pod \"0346fe68-c180-481b-879e-59ee91287a7e\" (UID: \"0346fe68-c180-481b-879e-59ee91287a7e\") " Nov 26 14:38:55 crc kubenswrapper[5037]: I1126 14:38:55.823143 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0346fe68-c180-481b-879e-59ee91287a7e-dns-svc\") pod \"0346fe68-c180-481b-879e-59ee91287a7e\" (UID: \"0346fe68-c180-481b-879e-59ee91287a7e\") " Nov 26 14:38:55 crc kubenswrapper[5037]: I1126 14:38:55.823559 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0346fe68-c180-481b-879e-59ee91287a7e-config" (OuterVolumeSpecName: "config") pod "0346fe68-c180-481b-879e-59ee91287a7e" (UID: "0346fe68-c180-481b-879e-59ee91287a7e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:38:55 crc kubenswrapper[5037]: I1126 14:38:55.823760 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0346fe68-c180-481b-879e-59ee91287a7e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0346fe68-c180-481b-879e-59ee91287a7e" (UID: "0346fe68-c180-481b-879e-59ee91287a7e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:38:55 crc kubenswrapper[5037]: I1126 14:38:55.916346 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0346fe68-c180-481b-879e-59ee91287a7e-kube-api-access-zlmdc" (OuterVolumeSpecName: "kube-api-access-zlmdc") pod "0346fe68-c180-481b-879e-59ee91287a7e" (UID: "0346fe68-c180-481b-879e-59ee91287a7e"). InnerVolumeSpecName "kube-api-access-zlmdc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:38:55 crc kubenswrapper[5037]: I1126 14:38:55.925588 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zlmdc\" (UniqueName: \"kubernetes.io/projected/0346fe68-c180-481b-879e-59ee91287a7e-kube-api-access-zlmdc\") on node \"crc\" DevicePath \"\"" Nov 26 14:38:55 crc kubenswrapper[5037]: I1126 14:38:55.925614 5037 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0346fe68-c180-481b-879e-59ee91287a7e-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 14:38:55 crc kubenswrapper[5037]: I1126 14:38:55.925649 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0346fe68-c180-481b-879e-59ee91287a7e-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:38:56 crc kubenswrapper[5037]: I1126 14:38:56.061831 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c6d9948dc-d58hm" event={"ID":"0346fe68-c180-481b-879e-59ee91287a7e","Type":"ContainerDied","Data":"62ef05b460955a36a8707c495ec79b4a4609871d7bffa021944164e9b30b8dda"} Nov 26 14:38:56 crc kubenswrapper[5037]: I1126 14:38:56.062418 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c6d9948dc-d58hm" Nov 26 14:38:56 crc kubenswrapper[5037]: I1126 14:38:56.067851 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-264cs" event={"ID":"80ce8a9a-aa28-40e4-ac35-c7d379224208","Type":"ContainerStarted","Data":"dc60cd871e55f538b7db49d446338e526894553aaa076fdbe1e2f04a853fb486"} Nov 26 14:38:56 crc kubenswrapper[5037]: I1126 14:38:56.067891 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-264cs" event={"ID":"80ce8a9a-aa28-40e4-ac35-c7d379224208","Type":"ContainerStarted","Data":"1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff"} Nov 26 14:38:56 crc kubenswrapper[5037]: I1126 14:38:56.068187 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-264cs" Nov 26 14:38:56 crc kubenswrapper[5037]: I1126 14:38:56.068262 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-264cs" Nov 26 14:38:56 crc kubenswrapper[5037]: I1126 14:38:56.070489 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"ba78b94a-32d0-4377-ac41-ffd036b241bf","Type":"ContainerStarted","Data":"773034796f31390fe28fdc58e1e871d5f541491426df6c51095f768444fbd35d"} Nov 26 14:38:56 crc kubenswrapper[5037]: I1126 14:38:56.092180 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-264cs" podStartSLOduration=29.23389558 podStartE2EDuration="36.092158924s" podCreationTimestamp="2025-11-26 14:38:20 +0000 UTC" firstStartedPulling="2025-11-26 14:38:46.245095191 +0000 UTC m=+1393.041865365" lastFinishedPulling="2025-11-26 14:38:53.103358525 +0000 UTC m=+1399.900128709" observedRunningTime="2025-11-26 14:38:56.090360071 +0000 UTC m=+1402.887130265" watchObservedRunningTime="2025-11-26 14:38:56.092158924 +0000 UTC m=+1402.888929108" Nov 26 14:38:56 crc kubenswrapper[5037]: I1126 14:38:56.152785 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c6d9948dc-d58hm"] Nov 26 14:38:56 crc kubenswrapper[5037]: I1126 14:38:56.160413 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7c6d9948dc-d58hm"] Nov 26 14:38:57 crc kubenswrapper[5037]: I1126 14:38:57.929249 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0346fe68-c180-481b-879e-59ee91287a7e" path="/var/lib/kubelet/pods/0346fe68-c180-481b-879e-59ee91287a7e/volumes" Nov 26 14:38:59 crc kubenswrapper[5037]: I1126 14:38:59.114024 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48","Type":"ContainerStarted","Data":"88d82a9101d1e849e1da1553fa5b16c81210a121618ecb5e38def19cad7dc725"} Nov 26 14:38:59 crc kubenswrapper[5037]: I1126 14:38:59.116410 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"bf45bdb2-c880-43f7-b30a-4d1b36363f7d","Type":"ContainerStarted","Data":"50c1e61dc6403c1bf62fd3bbd57c7e084ff6e25c816cb5cc6049442c76ec2eba"} Nov 26 14:38:59 crc kubenswrapper[5037]: I1126 14:38:59.118757 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"bdd4849b-e92e-473d-88d0-74c060c04eb7","Type":"ContainerStarted","Data":"ac800c71f24567e467410fb7c333d7691707d518b6f0f84492f89244a18f9205"} Nov 26 14:38:59 crc kubenswrapper[5037]: I1126 14:38:59.119039 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 26 14:38:59 crc kubenswrapper[5037]: I1126 14:38:59.122497 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89","Type":"ContainerStarted","Data":"15306a8687a9663850db67213877c281cc0db7db6eb704f63cd32810d22a787d"} Nov 26 14:38:59 crc kubenswrapper[5037]: I1126 14:38:59.147819 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=25.618726621 podStartE2EDuration="37.147796496s" podCreationTimestamp="2025-11-26 14:38:22 +0000 UTC" firstStartedPulling="2025-11-26 14:38:46.360555098 +0000 UTC m=+1393.157325282" lastFinishedPulling="2025-11-26 14:38:57.889624953 +0000 UTC m=+1404.686395157" observedRunningTime="2025-11-26 14:38:59.139630947 +0000 UTC m=+1405.936401171" watchObservedRunningTime="2025-11-26 14:38:59.147796496 +0000 UTC m=+1405.944566700" Nov 26 14:38:59 crc kubenswrapper[5037]: I1126 14:38:59.191678 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=29.193095401 podStartE2EDuration="40.191648655s" podCreationTimestamp="2025-11-26 14:38:19 +0000 UTC" firstStartedPulling="2025-11-26 14:38:46.891839858 +0000 UTC m=+1393.688610082" lastFinishedPulling="2025-11-26 14:38:57.890393112 +0000 UTC m=+1404.687163336" observedRunningTime="2025-11-26 14:38:59.187781601 +0000 UTC m=+1405.984551785" watchObservedRunningTime="2025-11-26 14:38:59.191648655 +0000 UTC m=+1405.988418849" Nov 26 14:38:59 crc kubenswrapper[5037]: I1126 14:38:59.206620 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=8.727345285 podStartE2EDuration="46.20659727s" podCreationTimestamp="2025-11-26 14:38:13 +0000 UTC" firstStartedPulling="2025-11-26 14:38:20.41247794 +0000 UTC m=+1367.209248124" lastFinishedPulling="2025-11-26 14:38:57.891729885 +0000 UTC m=+1404.688500109" observedRunningTime="2025-11-26 14:38:59.203242228 +0000 UTC m=+1406.000012402" watchObservedRunningTime="2025-11-26 14:38:59.20659727 +0000 UTC m=+1406.003367474" Nov 26 14:38:59 crc kubenswrapper[5037]: I1126 14:38:59.410228 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 26 14:38:59 crc kubenswrapper[5037]: I1126 14:38:59.476371 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 26 14:38:59 crc kubenswrapper[5037]: I1126 14:38:59.670137 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 26 14:38:59 crc kubenswrapper[5037]: I1126 14:38:59.714694 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.130427 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.130475 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.174562 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.177223 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.441454 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-65c78595c5-kjrqh"] Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.443154 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-65c78595c5-kjrqh" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.446049 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.477489 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-65c78595c5-kjrqh"] Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.501240 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-j7ksk"] Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.502625 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-j7ksk" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.503835 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-j7ksk"] Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.506194 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.583516 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-65c78595c5-kjrqh"] Nov 26 14:39:00 crc kubenswrapper[5037]: E1126 14:39:00.584423 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[config dns-svc kube-api-access-kxcbk ovsdbserver-nb], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/dnsmasq-dns-65c78595c5-kjrqh" podUID="6eae495a-219f-43d5-bc50-10cba06d2450" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.614458 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/20b489db-2066-4222-9131-99da1bd054e3-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-j7ksk\" (UID: \"20b489db-2066-4222-9131-99da1bd054e3\") " pod="openstack/ovn-controller-metrics-j7ksk" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.614566 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kxcbk\" (UniqueName: \"kubernetes.io/projected/6eae495a-219f-43d5-bc50-10cba06d2450-kube-api-access-kxcbk\") pod \"dnsmasq-dns-65c78595c5-kjrqh\" (UID: \"6eae495a-219f-43d5-bc50-10cba06d2450\") " pod="openstack/dnsmasq-dns-65c78595c5-kjrqh" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.614596 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20b489db-2066-4222-9131-99da1bd054e3-config\") pod \"ovn-controller-metrics-j7ksk\" (UID: \"20b489db-2066-4222-9131-99da1bd054e3\") " pod="openstack/ovn-controller-metrics-j7ksk" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.614626 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6eae495a-219f-43d5-bc50-10cba06d2450-config\") pod \"dnsmasq-dns-65c78595c5-kjrqh\" (UID: \"6eae495a-219f-43d5-bc50-10cba06d2450\") " pod="openstack/dnsmasq-dns-65c78595c5-kjrqh" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.614647 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20b489db-2066-4222-9131-99da1bd054e3-combined-ca-bundle\") pod \"ovn-controller-metrics-j7ksk\" (UID: \"20b489db-2066-4222-9131-99da1bd054e3\") " pod="openstack/ovn-controller-metrics-j7ksk" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.614667 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/20b489db-2066-4222-9131-99da1bd054e3-ovn-rundir\") pod \"ovn-controller-metrics-j7ksk\" (UID: \"20b489db-2066-4222-9131-99da1bd054e3\") " pod="openstack/ovn-controller-metrics-j7ksk" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.614699 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bg26k\" (UniqueName: \"kubernetes.io/projected/20b489db-2066-4222-9131-99da1bd054e3-kube-api-access-bg26k\") pod \"ovn-controller-metrics-j7ksk\" (UID: \"20b489db-2066-4222-9131-99da1bd054e3\") " pod="openstack/ovn-controller-metrics-j7ksk" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.614723 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6eae495a-219f-43d5-bc50-10cba06d2450-ovsdbserver-nb\") pod \"dnsmasq-dns-65c78595c5-kjrqh\" (UID: \"6eae495a-219f-43d5-bc50-10cba06d2450\") " pod="openstack/dnsmasq-dns-65c78595c5-kjrqh" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.614751 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6eae495a-219f-43d5-bc50-10cba06d2450-dns-svc\") pod \"dnsmasq-dns-65c78595c5-kjrqh\" (UID: \"6eae495a-219f-43d5-bc50-10cba06d2450\") " pod="openstack/dnsmasq-dns-65c78595c5-kjrqh" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.614784 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/20b489db-2066-4222-9131-99da1bd054e3-ovs-rundir\") pod \"ovn-controller-metrics-j7ksk\" (UID: \"20b489db-2066-4222-9131-99da1bd054e3\") " pod="openstack/ovn-controller-metrics-j7ksk" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.618103 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c7b6b5695-dsswh"] Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.620960 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6b5695-dsswh" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.624628 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.636916 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c7b6b5695-dsswh"] Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.679128 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.684590 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.688049 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.688882 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.688959 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.688961 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-c5w47" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.697254 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.715905 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d63e10d-a167-4255-bb40-cdcf22f68680-config\") pod \"dnsmasq-dns-5c7b6b5695-dsswh\" (UID: \"8d63e10d-a167-4255-bb40-cdcf22f68680\") " pod="openstack/dnsmasq-dns-5c7b6b5695-dsswh" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.715953 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6eae495a-219f-43d5-bc50-10cba06d2450-config\") pod \"dnsmasq-dns-65c78595c5-kjrqh\" (UID: \"6eae495a-219f-43d5-bc50-10cba06d2450\") " pod="openstack/dnsmasq-dns-65c78595c5-kjrqh" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.715976 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mzt8w\" (UniqueName: \"kubernetes.io/projected/8d63e10d-a167-4255-bb40-cdcf22f68680-kube-api-access-mzt8w\") pod \"dnsmasq-dns-5c7b6b5695-dsswh\" (UID: \"8d63e10d-a167-4255-bb40-cdcf22f68680\") " pod="openstack/dnsmasq-dns-5c7b6b5695-dsswh" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.716005 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20b489db-2066-4222-9131-99da1bd054e3-combined-ca-bundle\") pod \"ovn-controller-metrics-j7ksk\" (UID: \"20b489db-2066-4222-9131-99da1bd054e3\") " pod="openstack/ovn-controller-metrics-j7ksk" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.716037 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/20b489db-2066-4222-9131-99da1bd054e3-ovn-rundir\") pod \"ovn-controller-metrics-j7ksk\" (UID: \"20b489db-2066-4222-9131-99da1bd054e3\") " pod="openstack/ovn-controller-metrics-j7ksk" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.716077 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bg26k\" (UniqueName: \"kubernetes.io/projected/20b489db-2066-4222-9131-99da1bd054e3-kube-api-access-bg26k\") pod \"ovn-controller-metrics-j7ksk\" (UID: \"20b489db-2066-4222-9131-99da1bd054e3\") " pod="openstack/ovn-controller-metrics-j7ksk" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.716098 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8d63e10d-a167-4255-bb40-cdcf22f68680-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7b6b5695-dsswh\" (UID: \"8d63e10d-a167-4255-bb40-cdcf22f68680\") " pod="openstack/dnsmasq-dns-5c7b6b5695-dsswh" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.716112 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6eae495a-219f-43d5-bc50-10cba06d2450-ovsdbserver-nb\") pod \"dnsmasq-dns-65c78595c5-kjrqh\" (UID: \"6eae495a-219f-43d5-bc50-10cba06d2450\") " pod="openstack/dnsmasq-dns-65c78595c5-kjrqh" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.716140 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6eae495a-219f-43d5-bc50-10cba06d2450-dns-svc\") pod \"dnsmasq-dns-65c78595c5-kjrqh\" (UID: \"6eae495a-219f-43d5-bc50-10cba06d2450\") " pod="openstack/dnsmasq-dns-65c78595c5-kjrqh" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.716173 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/20b489db-2066-4222-9131-99da1bd054e3-ovs-rundir\") pod \"ovn-controller-metrics-j7ksk\" (UID: \"20b489db-2066-4222-9131-99da1bd054e3\") " pod="openstack/ovn-controller-metrics-j7ksk" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.716193 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/20b489db-2066-4222-9131-99da1bd054e3-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-j7ksk\" (UID: \"20b489db-2066-4222-9131-99da1bd054e3\") " pod="openstack/ovn-controller-metrics-j7ksk" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.716235 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8d63e10d-a167-4255-bb40-cdcf22f68680-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7b6b5695-dsswh\" (UID: \"8d63e10d-a167-4255-bb40-cdcf22f68680\") " pod="openstack/dnsmasq-dns-5c7b6b5695-dsswh" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.716263 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kxcbk\" (UniqueName: \"kubernetes.io/projected/6eae495a-219f-43d5-bc50-10cba06d2450-kube-api-access-kxcbk\") pod \"dnsmasq-dns-65c78595c5-kjrqh\" (UID: \"6eae495a-219f-43d5-bc50-10cba06d2450\") " pod="openstack/dnsmasq-dns-65c78595c5-kjrqh" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.716304 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d63e10d-a167-4255-bb40-cdcf22f68680-dns-svc\") pod \"dnsmasq-dns-5c7b6b5695-dsswh\" (UID: \"8d63e10d-a167-4255-bb40-cdcf22f68680\") " pod="openstack/dnsmasq-dns-5c7b6b5695-dsswh" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.716333 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20b489db-2066-4222-9131-99da1bd054e3-config\") pod \"ovn-controller-metrics-j7ksk\" (UID: \"20b489db-2066-4222-9131-99da1bd054e3\") " pod="openstack/ovn-controller-metrics-j7ksk" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.716974 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20b489db-2066-4222-9131-99da1bd054e3-config\") pod \"ovn-controller-metrics-j7ksk\" (UID: \"20b489db-2066-4222-9131-99da1bd054e3\") " pod="openstack/ovn-controller-metrics-j7ksk" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.717157 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6eae495a-219f-43d5-bc50-10cba06d2450-config\") pod \"dnsmasq-dns-65c78595c5-kjrqh\" (UID: \"6eae495a-219f-43d5-bc50-10cba06d2450\") " pod="openstack/dnsmasq-dns-65c78595c5-kjrqh" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.717680 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6eae495a-219f-43d5-bc50-10cba06d2450-ovsdbserver-nb\") pod \"dnsmasq-dns-65c78595c5-kjrqh\" (UID: \"6eae495a-219f-43d5-bc50-10cba06d2450\") " pod="openstack/dnsmasq-dns-65c78595c5-kjrqh" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.718218 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6eae495a-219f-43d5-bc50-10cba06d2450-dns-svc\") pod \"dnsmasq-dns-65c78595c5-kjrqh\" (UID: \"6eae495a-219f-43d5-bc50-10cba06d2450\") " pod="openstack/dnsmasq-dns-65c78595c5-kjrqh" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.718417 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/20b489db-2066-4222-9131-99da1bd054e3-ovs-rundir\") pod \"ovn-controller-metrics-j7ksk\" (UID: \"20b489db-2066-4222-9131-99da1bd054e3\") " pod="openstack/ovn-controller-metrics-j7ksk" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.718458 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/20b489db-2066-4222-9131-99da1bd054e3-ovn-rundir\") pod \"ovn-controller-metrics-j7ksk\" (UID: \"20b489db-2066-4222-9131-99da1bd054e3\") " pod="openstack/ovn-controller-metrics-j7ksk" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.743386 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20b489db-2066-4222-9131-99da1bd054e3-combined-ca-bundle\") pod \"ovn-controller-metrics-j7ksk\" (UID: \"20b489db-2066-4222-9131-99da1bd054e3\") " pod="openstack/ovn-controller-metrics-j7ksk" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.744091 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/20b489db-2066-4222-9131-99da1bd054e3-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-j7ksk\" (UID: \"20b489db-2066-4222-9131-99da1bd054e3\") " pod="openstack/ovn-controller-metrics-j7ksk" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.748047 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kxcbk\" (UniqueName: \"kubernetes.io/projected/6eae495a-219f-43d5-bc50-10cba06d2450-kube-api-access-kxcbk\") pod \"dnsmasq-dns-65c78595c5-kjrqh\" (UID: \"6eae495a-219f-43d5-bc50-10cba06d2450\") " pod="openstack/dnsmasq-dns-65c78595c5-kjrqh" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.752866 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bg26k\" (UniqueName: \"kubernetes.io/projected/20b489db-2066-4222-9131-99da1bd054e3-kube-api-access-bg26k\") pod \"ovn-controller-metrics-j7ksk\" (UID: \"20b489db-2066-4222-9131-99da1bd054e3\") " pod="openstack/ovn-controller-metrics-j7ksk" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.818192 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ec26620a-6ad8-4792-bb25-543dc31d3be5-config\") pod \"ovn-northd-0\" (UID: \"ec26620a-6ad8-4792-bb25-543dc31d3be5\") " pod="openstack/ovn-northd-0" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.818480 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ec26620a-6ad8-4792-bb25-543dc31d3be5-scripts\") pod \"ovn-northd-0\" (UID: \"ec26620a-6ad8-4792-bb25-543dc31d3be5\") " pod="openstack/ovn-northd-0" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.818634 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec26620a-6ad8-4792-bb25-543dc31d3be5-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"ec26620a-6ad8-4792-bb25-543dc31d3be5\") " pod="openstack/ovn-northd-0" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.818754 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec26620a-6ad8-4792-bb25-543dc31d3be5-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"ec26620a-6ad8-4792-bb25-543dc31d3be5\") " pod="openstack/ovn-northd-0" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.818864 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8d63e10d-a167-4255-bb40-cdcf22f68680-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7b6b5695-dsswh\" (UID: \"8d63e10d-a167-4255-bb40-cdcf22f68680\") " pod="openstack/dnsmasq-dns-5c7b6b5695-dsswh" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.818970 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dbmnh\" (UniqueName: \"kubernetes.io/projected/ec26620a-6ad8-4792-bb25-543dc31d3be5-kube-api-access-dbmnh\") pod \"ovn-northd-0\" (UID: \"ec26620a-6ad8-4792-bb25-543dc31d3be5\") " pod="openstack/ovn-northd-0" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.819067 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d63e10d-a167-4255-bb40-cdcf22f68680-dns-svc\") pod \"dnsmasq-dns-5c7b6b5695-dsswh\" (UID: \"8d63e10d-a167-4255-bb40-cdcf22f68680\") " pod="openstack/dnsmasq-dns-5c7b6b5695-dsswh" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.819171 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec26620a-6ad8-4792-bb25-543dc31d3be5-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"ec26620a-6ad8-4792-bb25-543dc31d3be5\") " pod="openstack/ovn-northd-0" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.819310 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d63e10d-a167-4255-bb40-cdcf22f68680-config\") pod \"dnsmasq-dns-5c7b6b5695-dsswh\" (UID: \"8d63e10d-a167-4255-bb40-cdcf22f68680\") " pod="openstack/dnsmasq-dns-5c7b6b5695-dsswh" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.819409 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mzt8w\" (UniqueName: \"kubernetes.io/projected/8d63e10d-a167-4255-bb40-cdcf22f68680-kube-api-access-mzt8w\") pod \"dnsmasq-dns-5c7b6b5695-dsswh\" (UID: \"8d63e10d-a167-4255-bb40-cdcf22f68680\") " pod="openstack/dnsmasq-dns-5c7b6b5695-dsswh" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.819594 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8d63e10d-a167-4255-bb40-cdcf22f68680-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7b6b5695-dsswh\" (UID: \"8d63e10d-a167-4255-bb40-cdcf22f68680\") " pod="openstack/dnsmasq-dns-5c7b6b5695-dsswh" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.819656 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ec26620a-6ad8-4792-bb25-543dc31d3be5-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"ec26620a-6ad8-4792-bb25-543dc31d3be5\") " pod="openstack/ovn-northd-0" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.819877 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8d63e10d-a167-4255-bb40-cdcf22f68680-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7b6b5695-dsswh\" (UID: \"8d63e10d-a167-4255-bb40-cdcf22f68680\") " pod="openstack/dnsmasq-dns-5c7b6b5695-dsswh" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.820202 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d63e10d-a167-4255-bb40-cdcf22f68680-dns-svc\") pod \"dnsmasq-dns-5c7b6b5695-dsswh\" (UID: \"8d63e10d-a167-4255-bb40-cdcf22f68680\") " pod="openstack/dnsmasq-dns-5c7b6b5695-dsswh" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.820337 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d63e10d-a167-4255-bb40-cdcf22f68680-config\") pod \"dnsmasq-dns-5c7b6b5695-dsswh\" (UID: \"8d63e10d-a167-4255-bb40-cdcf22f68680\") " pod="openstack/dnsmasq-dns-5c7b6b5695-dsswh" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.820722 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8d63e10d-a167-4255-bb40-cdcf22f68680-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7b6b5695-dsswh\" (UID: \"8d63e10d-a167-4255-bb40-cdcf22f68680\") " pod="openstack/dnsmasq-dns-5c7b6b5695-dsswh" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.823872 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-j7ksk" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.842114 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mzt8w\" (UniqueName: \"kubernetes.io/projected/8d63e10d-a167-4255-bb40-cdcf22f68680-kube-api-access-mzt8w\") pod \"dnsmasq-dns-5c7b6b5695-dsswh\" (UID: \"8d63e10d-a167-4255-bb40-cdcf22f68680\") " pod="openstack/dnsmasq-dns-5c7b6b5695-dsswh" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.921554 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec26620a-6ad8-4792-bb25-543dc31d3be5-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"ec26620a-6ad8-4792-bb25-543dc31d3be5\") " pod="openstack/ovn-northd-0" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.921731 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec26620a-6ad8-4792-bb25-543dc31d3be5-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"ec26620a-6ad8-4792-bb25-543dc31d3be5\") " pod="openstack/ovn-northd-0" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.921759 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dbmnh\" (UniqueName: \"kubernetes.io/projected/ec26620a-6ad8-4792-bb25-543dc31d3be5-kube-api-access-dbmnh\") pod \"ovn-northd-0\" (UID: \"ec26620a-6ad8-4792-bb25-543dc31d3be5\") " pod="openstack/ovn-northd-0" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.921783 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec26620a-6ad8-4792-bb25-543dc31d3be5-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"ec26620a-6ad8-4792-bb25-543dc31d3be5\") " pod="openstack/ovn-northd-0" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.921855 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ec26620a-6ad8-4792-bb25-543dc31d3be5-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"ec26620a-6ad8-4792-bb25-543dc31d3be5\") " pod="openstack/ovn-northd-0" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.921900 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ec26620a-6ad8-4792-bb25-543dc31d3be5-config\") pod \"ovn-northd-0\" (UID: \"ec26620a-6ad8-4792-bb25-543dc31d3be5\") " pod="openstack/ovn-northd-0" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.921929 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ec26620a-6ad8-4792-bb25-543dc31d3be5-scripts\") pod \"ovn-northd-0\" (UID: \"ec26620a-6ad8-4792-bb25-543dc31d3be5\") " pod="openstack/ovn-northd-0" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.923051 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ec26620a-6ad8-4792-bb25-543dc31d3be5-scripts\") pod \"ovn-northd-0\" (UID: \"ec26620a-6ad8-4792-bb25-543dc31d3be5\") " pod="openstack/ovn-northd-0" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.924227 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ec26620a-6ad8-4792-bb25-543dc31d3be5-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"ec26620a-6ad8-4792-bb25-543dc31d3be5\") " pod="openstack/ovn-northd-0" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.924766 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ec26620a-6ad8-4792-bb25-543dc31d3be5-config\") pod \"ovn-northd-0\" (UID: \"ec26620a-6ad8-4792-bb25-543dc31d3be5\") " pod="openstack/ovn-northd-0" Nov 26 14:39:00 crc kubenswrapper[5037]: I1126 14:39:00.956646 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6b5695-dsswh" Nov 26 14:39:01 crc kubenswrapper[5037]: I1126 14:39:01.015936 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec26620a-6ad8-4792-bb25-543dc31d3be5-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"ec26620a-6ad8-4792-bb25-543dc31d3be5\") " pod="openstack/ovn-northd-0" Nov 26 14:39:01 crc kubenswrapper[5037]: I1126 14:39:01.015945 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec26620a-6ad8-4792-bb25-543dc31d3be5-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"ec26620a-6ad8-4792-bb25-543dc31d3be5\") " pod="openstack/ovn-northd-0" Nov 26 14:39:01 crc kubenswrapper[5037]: I1126 14:39:01.016345 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec26620a-6ad8-4792-bb25-543dc31d3be5-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"ec26620a-6ad8-4792-bb25-543dc31d3be5\") " pod="openstack/ovn-northd-0" Nov 26 14:39:01 crc kubenswrapper[5037]: I1126 14:39:01.020096 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dbmnh\" (UniqueName: \"kubernetes.io/projected/ec26620a-6ad8-4792-bb25-543dc31d3be5-kube-api-access-dbmnh\") pod \"ovn-northd-0\" (UID: \"ec26620a-6ad8-4792-bb25-543dc31d3be5\") " pod="openstack/ovn-northd-0" Nov 26 14:39:01 crc kubenswrapper[5037]: I1126 14:39:01.066846 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-j7ksk"] Nov 26 14:39:01 crc kubenswrapper[5037]: I1126 14:39:01.159660 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"300dce8f-4337-4707-8075-f32b93f03e4f","Type":"ContainerStarted","Data":"b5c06e36fe4f8a14ce6fd85185880cc6b1029af8d4de11a5e266f008fdc3b833"} Nov 26 14:39:01 crc kubenswrapper[5037]: I1126 14:39:01.161631 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-j7ksk" event={"ID":"20b489db-2066-4222-9131-99da1bd054e3","Type":"ContainerStarted","Data":"74e3e529f3af67fb4aaec99f6c27730587b3ddc393d2f7e77e75f62a7c4ebfa6"} Nov 26 14:39:01 crc kubenswrapper[5037]: I1126 14:39:01.162074 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-65c78595c5-kjrqh" Nov 26 14:39:01 crc kubenswrapper[5037]: I1126 14:39:01.189050 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-65c78595c5-kjrqh" Nov 26 14:39:01 crc kubenswrapper[5037]: I1126 14:39:01.307604 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 26 14:39:01 crc kubenswrapper[5037]: I1126 14:39:01.333410 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6eae495a-219f-43d5-bc50-10cba06d2450-dns-svc\") pod \"6eae495a-219f-43d5-bc50-10cba06d2450\" (UID: \"6eae495a-219f-43d5-bc50-10cba06d2450\") " Nov 26 14:39:01 crc kubenswrapper[5037]: I1126 14:39:01.333732 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6eae495a-219f-43d5-bc50-10cba06d2450-config\") pod \"6eae495a-219f-43d5-bc50-10cba06d2450\" (UID: \"6eae495a-219f-43d5-bc50-10cba06d2450\") " Nov 26 14:39:01 crc kubenswrapper[5037]: I1126 14:39:01.333805 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kxcbk\" (UniqueName: \"kubernetes.io/projected/6eae495a-219f-43d5-bc50-10cba06d2450-kube-api-access-kxcbk\") pod \"6eae495a-219f-43d5-bc50-10cba06d2450\" (UID: \"6eae495a-219f-43d5-bc50-10cba06d2450\") " Nov 26 14:39:01 crc kubenswrapper[5037]: I1126 14:39:01.333859 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6eae495a-219f-43d5-bc50-10cba06d2450-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6eae495a-219f-43d5-bc50-10cba06d2450" (UID: "6eae495a-219f-43d5-bc50-10cba06d2450"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:39:01 crc kubenswrapper[5037]: I1126 14:39:01.333870 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6eae495a-219f-43d5-bc50-10cba06d2450-ovsdbserver-nb\") pod \"6eae495a-219f-43d5-bc50-10cba06d2450\" (UID: \"6eae495a-219f-43d5-bc50-10cba06d2450\") " Nov 26 14:39:01 crc kubenswrapper[5037]: I1126 14:39:01.334394 5037 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6eae495a-219f-43d5-bc50-10cba06d2450-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:01 crc kubenswrapper[5037]: I1126 14:39:01.334403 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6eae495a-219f-43d5-bc50-10cba06d2450-config" (OuterVolumeSpecName: "config") pod "6eae495a-219f-43d5-bc50-10cba06d2450" (UID: "6eae495a-219f-43d5-bc50-10cba06d2450"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:39:01 crc kubenswrapper[5037]: I1126 14:39:01.335634 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6eae495a-219f-43d5-bc50-10cba06d2450-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "6eae495a-219f-43d5-bc50-10cba06d2450" (UID: "6eae495a-219f-43d5-bc50-10cba06d2450"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:39:01 crc kubenswrapper[5037]: I1126 14:39:01.339009 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6eae495a-219f-43d5-bc50-10cba06d2450-kube-api-access-kxcbk" (OuterVolumeSpecName: "kube-api-access-kxcbk") pod "6eae495a-219f-43d5-bc50-10cba06d2450" (UID: "6eae495a-219f-43d5-bc50-10cba06d2450"). InnerVolumeSpecName "kube-api-access-kxcbk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:39:01 crc kubenswrapper[5037]: I1126 14:39:01.435749 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6eae495a-219f-43d5-bc50-10cba06d2450-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:01 crc kubenswrapper[5037]: I1126 14:39:01.435783 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kxcbk\" (UniqueName: \"kubernetes.io/projected/6eae495a-219f-43d5-bc50-10cba06d2450-kube-api-access-kxcbk\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:01 crc kubenswrapper[5037]: I1126 14:39:01.435795 5037 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6eae495a-219f-43d5-bc50-10cba06d2450-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:01 crc kubenswrapper[5037]: I1126 14:39:01.438083 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c7b6b5695-dsswh"] Nov 26 14:39:01 crc kubenswrapper[5037]: I1126 14:39:01.784074 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 26 14:39:01 crc kubenswrapper[5037]: W1126 14:39:01.790480 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podec26620a_6ad8_4792_bb25_543dc31d3be5.slice/crio-42dcb3ac7ab34f187ccdcfaf4bb9e02e3e8ae4d1454ba4529ee3898b2b5d4db3 WatchSource:0}: Error finding container 42dcb3ac7ab34f187ccdcfaf4bb9e02e3e8ae4d1454ba4529ee3898b2b5d4db3: Status 404 returned error can't find the container with id 42dcb3ac7ab34f187ccdcfaf4bb9e02e3e8ae4d1454ba4529ee3898b2b5d4db3 Nov 26 14:39:02 crc kubenswrapper[5037]: I1126 14:39:02.170016 5037 generic.go:334] "Generic (PLEG): container finished" podID="8d63e10d-a167-4255-bb40-cdcf22f68680" containerID="86b088378442a6bc7aaa68a43a1c6be73e43cab073b7fe25ffe042a8a2402305" exitCode=0 Nov 26 14:39:02 crc kubenswrapper[5037]: I1126 14:39:02.170061 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6b5695-dsswh" event={"ID":"8d63e10d-a167-4255-bb40-cdcf22f68680","Type":"ContainerDied","Data":"86b088378442a6bc7aaa68a43a1c6be73e43cab073b7fe25ffe042a8a2402305"} Nov 26 14:39:02 crc kubenswrapper[5037]: I1126 14:39:02.170095 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6b5695-dsswh" event={"ID":"8d63e10d-a167-4255-bb40-cdcf22f68680","Type":"ContainerStarted","Data":"53162c874e80612b32d9e9a74604af34f1c305353102372e7fcd2e3c2efb9c00"} Nov 26 14:39:02 crc kubenswrapper[5037]: I1126 14:39:02.171679 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"ec26620a-6ad8-4792-bb25-543dc31d3be5","Type":"ContainerStarted","Data":"42dcb3ac7ab34f187ccdcfaf4bb9e02e3e8ae4d1454ba4529ee3898b2b5d4db3"} Nov 26 14:39:02 crc kubenswrapper[5037]: I1126 14:39:02.173340 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"7f05291f-1331-411b-9971-c71218d11a35","Type":"ContainerStarted","Data":"1349d55b286187786ca0c0752c21570fa4516f326b8465f8b5bb44574d1252f9"} Nov 26 14:39:02 crc kubenswrapper[5037]: I1126 14:39:02.175674 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-j7ksk" event={"ID":"20b489db-2066-4222-9131-99da1bd054e3","Type":"ContainerStarted","Data":"40c2342f25d25bc9e86e10174589a9485aa62cf36b1645c8581d0f0406ccb2fc"} Nov 26 14:39:02 crc kubenswrapper[5037]: I1126 14:39:02.175772 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-65c78595c5-kjrqh" Nov 26 14:39:02 crc kubenswrapper[5037]: I1126 14:39:02.208021 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-j7ksk" podStartSLOduration=2.208005968 podStartE2EDuration="2.208005968s" podCreationTimestamp="2025-11-26 14:39:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:39:02.207634409 +0000 UTC m=+1409.004404603" watchObservedRunningTime="2025-11-26 14:39:02.208005968 +0000 UTC m=+1409.004776152" Nov 26 14:39:02 crc kubenswrapper[5037]: I1126 14:39:02.265530 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-65c78595c5-kjrqh"] Nov 26 14:39:02 crc kubenswrapper[5037]: I1126 14:39:02.274996 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-65c78595c5-kjrqh"] Nov 26 14:39:03 crc kubenswrapper[5037]: I1126 14:39:03.187165 5037 generic.go:334] "Generic (PLEG): container finished" podID="bf45bdb2-c880-43f7-b30a-4d1b36363f7d" containerID="50c1e61dc6403c1bf62fd3bbd57c7e084ff6e25c816cb5cc6049442c76ec2eba" exitCode=0 Nov 26 14:39:03 crc kubenswrapper[5037]: I1126 14:39:03.187198 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"bf45bdb2-c880-43f7-b30a-4d1b36363f7d","Type":"ContainerDied","Data":"50c1e61dc6403c1bf62fd3bbd57c7e084ff6e25c816cb5cc6049442c76ec2eba"} Nov 26 14:39:03 crc kubenswrapper[5037]: I1126 14:39:03.191830 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6b5695-dsswh" event={"ID":"8d63e10d-a167-4255-bb40-cdcf22f68680","Type":"ContainerStarted","Data":"0dd95fb86f5c9c7f2c0e3579b779a59b83c93b72c6f6bf8325a47fc2e45d68b7"} Nov 26 14:39:03 crc kubenswrapper[5037]: I1126 14:39:03.192069 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5c7b6b5695-dsswh" Nov 26 14:39:03 crc kubenswrapper[5037]: I1126 14:39:03.230488 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5c7b6b5695-dsswh" podStartSLOduration=3.230443401 podStartE2EDuration="3.230443401s" podCreationTimestamp="2025-11-26 14:39:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:39:03.228141284 +0000 UTC m=+1410.024911468" watchObservedRunningTime="2025-11-26 14:39:03.230443401 +0000 UTC m=+1410.027213595" Nov 26 14:39:03 crc kubenswrapper[5037]: I1126 14:39:03.917694 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6eae495a-219f-43d5-bc50-10cba06d2450" path="/var/lib/kubelet/pods/6eae495a-219f-43d5-bc50-10cba06d2450/volumes" Nov 26 14:39:04 crc kubenswrapper[5037]: I1126 14:39:04.097489 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 26 14:39:04 crc kubenswrapper[5037]: I1126 14:39:04.199253 5037 generic.go:334] "Generic (PLEG): container finished" podID="300dce8f-4337-4707-8075-f32b93f03e4f" containerID="b5c06e36fe4f8a14ce6fd85185880cc6b1029af8d4de11a5e266f008fdc3b833" exitCode=0 Nov 26 14:39:04 crc kubenswrapper[5037]: I1126 14:39:04.199339 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"300dce8f-4337-4707-8075-f32b93f03e4f","Type":"ContainerDied","Data":"b5c06e36fe4f8a14ce6fd85185880cc6b1029af8d4de11a5e266f008fdc3b833"} Nov 26 14:39:04 crc kubenswrapper[5037]: I1126 14:39:04.204619 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"ec26620a-6ad8-4792-bb25-543dc31d3be5","Type":"ContainerStarted","Data":"ea28ba554ccf3be563e142ef9810c318f1a7398137617c44deec729fa9ddf87d"} Nov 26 14:39:04 crc kubenswrapper[5037]: I1126 14:39:04.204674 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"ec26620a-6ad8-4792-bb25-543dc31d3be5","Type":"ContainerStarted","Data":"d3158b8703e1c139eecff816090fc54bf7b1598ce59a6a91d56a6bde613e9529"} Nov 26 14:39:04 crc kubenswrapper[5037]: I1126 14:39:04.205597 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 26 14:39:04 crc kubenswrapper[5037]: I1126 14:39:04.214916 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"bf45bdb2-c880-43f7-b30a-4d1b36363f7d","Type":"ContainerStarted","Data":"df7832f00f3c308d592d1eaea2015808ef735d6b8ec275b8ae637538886591de"} Nov 26 14:39:04 crc kubenswrapper[5037]: I1126 14:39:04.248885 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.864690108 podStartE2EDuration="4.248861644s" podCreationTimestamp="2025-11-26 14:39:00 +0000 UTC" firstStartedPulling="2025-11-26 14:39:01.79322147 +0000 UTC m=+1408.589991684" lastFinishedPulling="2025-11-26 14:39:03.177393026 +0000 UTC m=+1409.974163220" observedRunningTime="2025-11-26 14:39:04.243873482 +0000 UTC m=+1411.040643666" watchObservedRunningTime="2025-11-26 14:39:04.248861644 +0000 UTC m=+1411.045631818" Nov 26 14:39:04 crc kubenswrapper[5037]: I1126 14:39:04.284250 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=7.801277097 podStartE2EDuration="53.284230077s" podCreationTimestamp="2025-11-26 14:38:11 +0000 UTC" firstStartedPulling="2025-11-26 14:38:13.226169534 +0000 UTC m=+1360.022939718" lastFinishedPulling="2025-11-26 14:38:58.709122514 +0000 UTC m=+1405.505892698" observedRunningTime="2025-11-26 14:39:04.273436734 +0000 UTC m=+1411.070206918" watchObservedRunningTime="2025-11-26 14:39:04.284230077 +0000 UTC m=+1411.081000261" Nov 26 14:39:05 crc kubenswrapper[5037]: I1126 14:39:05.227431 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"300dce8f-4337-4707-8075-f32b93f03e4f","Type":"ContainerStarted","Data":"b153954d737c10034799ce4a540d151fbb5420d7fc983a9615870845e76fb0be"} Nov 26 14:39:05 crc kubenswrapper[5037]: I1126 14:39:05.252910 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=-9223371983.60189 podStartE2EDuration="53.252885457s" podCreationTimestamp="2025-11-26 14:38:12 +0000 UTC" firstStartedPulling="2025-11-26 14:38:20.412587912 +0000 UTC m=+1367.209358096" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:39:05.248679264 +0000 UTC m=+1412.045449458" watchObservedRunningTime="2025-11-26 14:39:05.252885457 +0000 UTC m=+1412.049655641" Nov 26 14:39:05 crc kubenswrapper[5037]: I1126 14:39:05.819584 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c7b6b5695-dsswh"] Nov 26 14:39:05 crc kubenswrapper[5037]: I1126 14:39:05.820717 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5c7b6b5695-dsswh" podUID="8d63e10d-a167-4255-bb40-cdcf22f68680" containerName="dnsmasq-dns" containerID="cri-o://0dd95fb86f5c9c7f2c0e3579b779a59b83c93b72c6f6bf8325a47fc2e45d68b7" gracePeriod=10 Nov 26 14:39:05 crc kubenswrapper[5037]: I1126 14:39:05.825521 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 26 14:39:05 crc kubenswrapper[5037]: I1126 14:39:05.870062 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-cf8bcbfcf-5cc8r"] Nov 26 14:39:05 crc kubenswrapper[5037]: I1126 14:39:05.872844 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf8bcbfcf-5cc8r" Nov 26 14:39:05 crc kubenswrapper[5037]: I1126 14:39:05.900198 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cf8bcbfcf-5cc8r"] Nov 26 14:39:05 crc kubenswrapper[5037]: I1126 14:39:05.914368 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4k6ml\" (UniqueName: \"kubernetes.io/projected/8e71c09e-69be-4196-beaf-e34c6b9880bb-kube-api-access-4k6ml\") pod \"dnsmasq-dns-cf8bcbfcf-5cc8r\" (UID: \"8e71c09e-69be-4196-beaf-e34c6b9880bb\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-5cc8r" Nov 26 14:39:05 crc kubenswrapper[5037]: I1126 14:39:05.914427 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8e71c09e-69be-4196-beaf-e34c6b9880bb-ovsdbserver-sb\") pod \"dnsmasq-dns-cf8bcbfcf-5cc8r\" (UID: \"8e71c09e-69be-4196-beaf-e34c6b9880bb\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-5cc8r" Nov 26 14:39:05 crc kubenswrapper[5037]: I1126 14:39:05.914464 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8e71c09e-69be-4196-beaf-e34c6b9880bb-dns-svc\") pod \"dnsmasq-dns-cf8bcbfcf-5cc8r\" (UID: \"8e71c09e-69be-4196-beaf-e34c6b9880bb\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-5cc8r" Nov 26 14:39:05 crc kubenswrapper[5037]: I1126 14:39:05.914550 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8e71c09e-69be-4196-beaf-e34c6b9880bb-config\") pod \"dnsmasq-dns-cf8bcbfcf-5cc8r\" (UID: \"8e71c09e-69be-4196-beaf-e34c6b9880bb\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-5cc8r" Nov 26 14:39:05 crc kubenswrapper[5037]: I1126 14:39:05.914579 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8e71c09e-69be-4196-beaf-e34c6b9880bb-ovsdbserver-nb\") pod \"dnsmasq-dns-cf8bcbfcf-5cc8r\" (UID: \"8e71c09e-69be-4196-beaf-e34c6b9880bb\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-5cc8r" Nov 26 14:39:06 crc kubenswrapper[5037]: I1126 14:39:06.015985 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8e71c09e-69be-4196-beaf-e34c6b9880bb-ovsdbserver-nb\") pod \"dnsmasq-dns-cf8bcbfcf-5cc8r\" (UID: \"8e71c09e-69be-4196-beaf-e34c6b9880bb\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-5cc8r" Nov 26 14:39:06 crc kubenswrapper[5037]: I1126 14:39:06.016104 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4k6ml\" (UniqueName: \"kubernetes.io/projected/8e71c09e-69be-4196-beaf-e34c6b9880bb-kube-api-access-4k6ml\") pod \"dnsmasq-dns-cf8bcbfcf-5cc8r\" (UID: \"8e71c09e-69be-4196-beaf-e34c6b9880bb\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-5cc8r" Nov 26 14:39:06 crc kubenswrapper[5037]: I1126 14:39:06.016139 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8e71c09e-69be-4196-beaf-e34c6b9880bb-ovsdbserver-sb\") pod \"dnsmasq-dns-cf8bcbfcf-5cc8r\" (UID: \"8e71c09e-69be-4196-beaf-e34c6b9880bb\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-5cc8r" Nov 26 14:39:06 crc kubenswrapper[5037]: I1126 14:39:06.016184 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8e71c09e-69be-4196-beaf-e34c6b9880bb-dns-svc\") pod \"dnsmasq-dns-cf8bcbfcf-5cc8r\" (UID: \"8e71c09e-69be-4196-beaf-e34c6b9880bb\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-5cc8r" Nov 26 14:39:06 crc kubenswrapper[5037]: I1126 14:39:06.016269 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8e71c09e-69be-4196-beaf-e34c6b9880bb-config\") pod \"dnsmasq-dns-cf8bcbfcf-5cc8r\" (UID: \"8e71c09e-69be-4196-beaf-e34c6b9880bb\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-5cc8r" Nov 26 14:39:06 crc kubenswrapper[5037]: I1126 14:39:06.017382 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8e71c09e-69be-4196-beaf-e34c6b9880bb-ovsdbserver-nb\") pod \"dnsmasq-dns-cf8bcbfcf-5cc8r\" (UID: \"8e71c09e-69be-4196-beaf-e34c6b9880bb\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-5cc8r" Nov 26 14:39:06 crc kubenswrapper[5037]: I1126 14:39:06.017431 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8e71c09e-69be-4196-beaf-e34c6b9880bb-config\") pod \"dnsmasq-dns-cf8bcbfcf-5cc8r\" (UID: \"8e71c09e-69be-4196-beaf-e34c6b9880bb\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-5cc8r" Nov 26 14:39:06 crc kubenswrapper[5037]: I1126 14:39:06.017877 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8e71c09e-69be-4196-beaf-e34c6b9880bb-dns-svc\") pod \"dnsmasq-dns-cf8bcbfcf-5cc8r\" (UID: \"8e71c09e-69be-4196-beaf-e34c6b9880bb\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-5cc8r" Nov 26 14:39:06 crc kubenswrapper[5037]: I1126 14:39:06.018582 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8e71c09e-69be-4196-beaf-e34c6b9880bb-ovsdbserver-sb\") pod \"dnsmasq-dns-cf8bcbfcf-5cc8r\" (UID: \"8e71c09e-69be-4196-beaf-e34c6b9880bb\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-5cc8r" Nov 26 14:39:06 crc kubenswrapper[5037]: I1126 14:39:06.039390 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4k6ml\" (UniqueName: \"kubernetes.io/projected/8e71c09e-69be-4196-beaf-e34c6b9880bb-kube-api-access-4k6ml\") pod \"dnsmasq-dns-cf8bcbfcf-5cc8r\" (UID: \"8e71c09e-69be-4196-beaf-e34c6b9880bb\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-5cc8r" Nov 26 14:39:06 crc kubenswrapper[5037]: I1126 14:39:06.194262 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf8bcbfcf-5cc8r" Nov 26 14:39:06 crc kubenswrapper[5037]: I1126 14:39:06.241140 5037 generic.go:334] "Generic (PLEG): container finished" podID="8d63e10d-a167-4255-bb40-cdcf22f68680" containerID="0dd95fb86f5c9c7f2c0e3579b779a59b83c93b72c6f6bf8325a47fc2e45d68b7" exitCode=0 Nov 26 14:39:06 crc kubenswrapper[5037]: I1126 14:39:06.241979 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6b5695-dsswh" event={"ID":"8d63e10d-a167-4255-bb40-cdcf22f68680","Type":"ContainerDied","Data":"0dd95fb86f5c9c7f2c0e3579b779a59b83c93b72c6f6bf8325a47fc2e45d68b7"} Nov 26 14:39:06 crc kubenswrapper[5037]: I1126 14:39:06.242030 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6b5695-dsswh" event={"ID":"8d63e10d-a167-4255-bb40-cdcf22f68680","Type":"ContainerDied","Data":"53162c874e80612b32d9e9a74604af34f1c305353102372e7fcd2e3c2efb9c00"} Nov 26 14:39:06 crc kubenswrapper[5037]: I1126 14:39:06.242042 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="53162c874e80612b32d9e9a74604af34f1c305353102372e7fcd2e3c2efb9c00" Nov 26 14:39:06 crc kubenswrapper[5037]: I1126 14:39:06.327935 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6b5695-dsswh" Nov 26 14:39:06 crc kubenswrapper[5037]: I1126 14:39:06.423987 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mzt8w\" (UniqueName: \"kubernetes.io/projected/8d63e10d-a167-4255-bb40-cdcf22f68680-kube-api-access-mzt8w\") pod \"8d63e10d-a167-4255-bb40-cdcf22f68680\" (UID: \"8d63e10d-a167-4255-bb40-cdcf22f68680\") " Nov 26 14:39:06 crc kubenswrapper[5037]: I1126 14:39:06.424115 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d63e10d-a167-4255-bb40-cdcf22f68680-dns-svc\") pod \"8d63e10d-a167-4255-bb40-cdcf22f68680\" (UID: \"8d63e10d-a167-4255-bb40-cdcf22f68680\") " Nov 26 14:39:06 crc kubenswrapper[5037]: I1126 14:39:06.424144 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8d63e10d-a167-4255-bb40-cdcf22f68680-ovsdbserver-sb\") pod \"8d63e10d-a167-4255-bb40-cdcf22f68680\" (UID: \"8d63e10d-a167-4255-bb40-cdcf22f68680\") " Nov 26 14:39:06 crc kubenswrapper[5037]: I1126 14:39:06.424217 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d63e10d-a167-4255-bb40-cdcf22f68680-config\") pod \"8d63e10d-a167-4255-bb40-cdcf22f68680\" (UID: \"8d63e10d-a167-4255-bb40-cdcf22f68680\") " Nov 26 14:39:06 crc kubenswrapper[5037]: I1126 14:39:06.424268 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8d63e10d-a167-4255-bb40-cdcf22f68680-ovsdbserver-nb\") pod \"8d63e10d-a167-4255-bb40-cdcf22f68680\" (UID: \"8d63e10d-a167-4255-bb40-cdcf22f68680\") " Nov 26 14:39:06 crc kubenswrapper[5037]: I1126 14:39:06.432097 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d63e10d-a167-4255-bb40-cdcf22f68680-kube-api-access-mzt8w" (OuterVolumeSpecName: "kube-api-access-mzt8w") pod "8d63e10d-a167-4255-bb40-cdcf22f68680" (UID: "8d63e10d-a167-4255-bb40-cdcf22f68680"). InnerVolumeSpecName "kube-api-access-mzt8w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:39:06 crc kubenswrapper[5037]: I1126 14:39:06.459565 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d63e10d-a167-4255-bb40-cdcf22f68680-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "8d63e10d-a167-4255-bb40-cdcf22f68680" (UID: "8d63e10d-a167-4255-bb40-cdcf22f68680"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:39:06 crc kubenswrapper[5037]: I1126 14:39:06.468992 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d63e10d-a167-4255-bb40-cdcf22f68680-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8d63e10d-a167-4255-bb40-cdcf22f68680" (UID: "8d63e10d-a167-4255-bb40-cdcf22f68680"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:39:06 crc kubenswrapper[5037]: I1126 14:39:06.480830 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d63e10d-a167-4255-bb40-cdcf22f68680-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "8d63e10d-a167-4255-bb40-cdcf22f68680" (UID: "8d63e10d-a167-4255-bb40-cdcf22f68680"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:39:06 crc kubenswrapper[5037]: I1126 14:39:06.484768 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d63e10d-a167-4255-bb40-cdcf22f68680-config" (OuterVolumeSpecName: "config") pod "8d63e10d-a167-4255-bb40-cdcf22f68680" (UID: "8d63e10d-a167-4255-bb40-cdcf22f68680"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:39:06 crc kubenswrapper[5037]: I1126 14:39:06.526517 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mzt8w\" (UniqueName: \"kubernetes.io/projected/8d63e10d-a167-4255-bb40-cdcf22f68680-kube-api-access-mzt8w\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:06 crc kubenswrapper[5037]: I1126 14:39:06.526572 5037 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d63e10d-a167-4255-bb40-cdcf22f68680-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:06 crc kubenswrapper[5037]: I1126 14:39:06.526589 5037 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8d63e10d-a167-4255-bb40-cdcf22f68680-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:06 crc kubenswrapper[5037]: I1126 14:39:06.526601 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d63e10d-a167-4255-bb40-cdcf22f68680-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:06 crc kubenswrapper[5037]: I1126 14:39:06.526612 5037 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8d63e10d-a167-4255-bb40-cdcf22f68680-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:06 crc kubenswrapper[5037]: I1126 14:39:06.670239 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cf8bcbfcf-5cc8r"] Nov 26 14:39:06 crc kubenswrapper[5037]: W1126 14:39:06.670771 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8e71c09e_69be_4196_beaf_e34c6b9880bb.slice/crio-1a564c4f5c87a1dcee66a028b0c284d71116e15e700b1fbb2544140640352a8b WatchSource:0}: Error finding container 1a564c4f5c87a1dcee66a028b0c284d71116e15e700b1fbb2544140640352a8b: Status 404 returned error can't find the container with id 1a564c4f5c87a1dcee66a028b0c284d71116e15e700b1fbb2544140640352a8b Nov 26 14:39:06 crc kubenswrapper[5037]: I1126 14:39:06.937601 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Nov 26 14:39:06 crc kubenswrapper[5037]: E1126 14:39:06.938174 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d63e10d-a167-4255-bb40-cdcf22f68680" containerName="init" Nov 26 14:39:06 crc kubenswrapper[5037]: I1126 14:39:06.938204 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d63e10d-a167-4255-bb40-cdcf22f68680" containerName="init" Nov 26 14:39:06 crc kubenswrapper[5037]: E1126 14:39:06.938226 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d63e10d-a167-4255-bb40-cdcf22f68680" containerName="dnsmasq-dns" Nov 26 14:39:06 crc kubenswrapper[5037]: I1126 14:39:06.938239 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d63e10d-a167-4255-bb40-cdcf22f68680" containerName="dnsmasq-dns" Nov 26 14:39:06 crc kubenswrapper[5037]: I1126 14:39:06.938589 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d63e10d-a167-4255-bb40-cdcf22f68680" containerName="dnsmasq-dns" Nov 26 14:39:06 crc kubenswrapper[5037]: I1126 14:39:06.991519 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 26 14:39:06 crc kubenswrapper[5037]: I1126 14:39:06.991674 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 26 14:39:07 crc kubenswrapper[5037]: I1126 14:39:06.999691 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-9vqq2" Nov 26 14:39:07 crc kubenswrapper[5037]: I1126 14:39:06.999723 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Nov 26 14:39:07 crc kubenswrapper[5037]: I1126 14:39:06.999910 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 26 14:39:07 crc kubenswrapper[5037]: I1126 14:39:07.000022 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 26 14:39:07 crc kubenswrapper[5037]: I1126 14:39:07.041323 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7ltfz\" (UniqueName: \"kubernetes.io/projected/10886f85-c800-4999-8c79-c490c60696cc-kube-api-access-7ltfz\") pod \"swift-storage-0\" (UID: \"10886f85-c800-4999-8c79-c490c60696cc\") " pod="openstack/swift-storage-0" Nov 26 14:39:07 crc kubenswrapper[5037]: I1126 14:39:07.041580 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"swift-storage-0\" (UID: \"10886f85-c800-4999-8c79-c490c60696cc\") " pod="openstack/swift-storage-0" Nov 26 14:39:07 crc kubenswrapper[5037]: I1126 14:39:07.041870 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/10886f85-c800-4999-8c79-c490c60696cc-cache\") pod \"swift-storage-0\" (UID: \"10886f85-c800-4999-8c79-c490c60696cc\") " pod="openstack/swift-storage-0" Nov 26 14:39:07 crc kubenswrapper[5037]: I1126 14:39:07.042019 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/10886f85-c800-4999-8c79-c490c60696cc-lock\") pod \"swift-storage-0\" (UID: \"10886f85-c800-4999-8c79-c490c60696cc\") " pod="openstack/swift-storage-0" Nov 26 14:39:07 crc kubenswrapper[5037]: I1126 14:39:07.042215 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/10886f85-c800-4999-8c79-c490c60696cc-etc-swift\") pod \"swift-storage-0\" (UID: \"10886f85-c800-4999-8c79-c490c60696cc\") " pod="openstack/swift-storage-0" Nov 26 14:39:07 crc kubenswrapper[5037]: I1126 14:39:07.144048 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/10886f85-c800-4999-8c79-c490c60696cc-cache\") pod \"swift-storage-0\" (UID: \"10886f85-c800-4999-8c79-c490c60696cc\") " pod="openstack/swift-storage-0" Nov 26 14:39:07 crc kubenswrapper[5037]: I1126 14:39:07.144107 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/10886f85-c800-4999-8c79-c490c60696cc-lock\") pod \"swift-storage-0\" (UID: \"10886f85-c800-4999-8c79-c490c60696cc\") " pod="openstack/swift-storage-0" Nov 26 14:39:07 crc kubenswrapper[5037]: I1126 14:39:07.144170 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/10886f85-c800-4999-8c79-c490c60696cc-etc-swift\") pod \"swift-storage-0\" (UID: \"10886f85-c800-4999-8c79-c490c60696cc\") " pod="openstack/swift-storage-0" Nov 26 14:39:07 crc kubenswrapper[5037]: I1126 14:39:07.144210 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7ltfz\" (UniqueName: \"kubernetes.io/projected/10886f85-c800-4999-8c79-c490c60696cc-kube-api-access-7ltfz\") pod \"swift-storage-0\" (UID: \"10886f85-c800-4999-8c79-c490c60696cc\") " pod="openstack/swift-storage-0" Nov 26 14:39:07 crc kubenswrapper[5037]: I1126 14:39:07.144251 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"swift-storage-0\" (UID: \"10886f85-c800-4999-8c79-c490c60696cc\") " pod="openstack/swift-storage-0" Nov 26 14:39:07 crc kubenswrapper[5037]: I1126 14:39:07.144485 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/10886f85-c800-4999-8c79-c490c60696cc-cache\") pod \"swift-storage-0\" (UID: \"10886f85-c800-4999-8c79-c490c60696cc\") " pod="openstack/swift-storage-0" Nov 26 14:39:07 crc kubenswrapper[5037]: E1126 14:39:07.144489 5037 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 26 14:39:07 crc kubenswrapper[5037]: E1126 14:39:07.144536 5037 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 26 14:39:07 crc kubenswrapper[5037]: E1126 14:39:07.144593 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/10886f85-c800-4999-8c79-c490c60696cc-etc-swift podName:10886f85-c800-4999-8c79-c490c60696cc nodeName:}" failed. No retries permitted until 2025-11-26 14:39:07.644573203 +0000 UTC m=+1414.441343387 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/10886f85-c800-4999-8c79-c490c60696cc-etc-swift") pod "swift-storage-0" (UID: "10886f85-c800-4999-8c79-c490c60696cc") : configmap "swift-ring-files" not found Nov 26 14:39:07 crc kubenswrapper[5037]: I1126 14:39:07.144598 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/10886f85-c800-4999-8c79-c490c60696cc-lock\") pod \"swift-storage-0\" (UID: \"10886f85-c800-4999-8c79-c490c60696cc\") " pod="openstack/swift-storage-0" Nov 26 14:39:07 crc kubenswrapper[5037]: I1126 14:39:07.144670 5037 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"swift-storage-0\" (UID: \"10886f85-c800-4999-8c79-c490c60696cc\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/swift-storage-0" Nov 26 14:39:07 crc kubenswrapper[5037]: I1126 14:39:07.168615 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"swift-storage-0\" (UID: \"10886f85-c800-4999-8c79-c490c60696cc\") " pod="openstack/swift-storage-0" Nov 26 14:39:07 crc kubenswrapper[5037]: I1126 14:39:07.171692 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7ltfz\" (UniqueName: \"kubernetes.io/projected/10886f85-c800-4999-8c79-c490c60696cc-kube-api-access-7ltfz\") pod \"swift-storage-0\" (UID: \"10886f85-c800-4999-8c79-c490c60696cc\") " pod="openstack/swift-storage-0" Nov 26 14:39:07 crc kubenswrapper[5037]: I1126 14:39:07.250576 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf8bcbfcf-5cc8r" event={"ID":"8e71c09e-69be-4196-beaf-e34c6b9880bb","Type":"ContainerStarted","Data":"cce77c2d47eb6c8a42167b67fa100f6a46dae5339817afbff1660e9604e478b8"} Nov 26 14:39:07 crc kubenswrapper[5037]: I1126 14:39:07.250654 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf8bcbfcf-5cc8r" event={"ID":"8e71c09e-69be-4196-beaf-e34c6b9880bb","Type":"ContainerStarted","Data":"1a564c4f5c87a1dcee66a028b0c284d71116e15e700b1fbb2544140640352a8b"} Nov 26 14:39:07 crc kubenswrapper[5037]: I1126 14:39:07.250703 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6b5695-dsswh" Nov 26 14:39:07 crc kubenswrapper[5037]: I1126 14:39:07.288083 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c7b6b5695-dsswh"] Nov 26 14:39:07 crc kubenswrapper[5037]: I1126 14:39:07.296372 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c7b6b5695-dsswh"] Nov 26 14:39:07 crc kubenswrapper[5037]: I1126 14:39:07.653578 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/10886f85-c800-4999-8c79-c490c60696cc-etc-swift\") pod \"swift-storage-0\" (UID: \"10886f85-c800-4999-8c79-c490c60696cc\") " pod="openstack/swift-storage-0" Nov 26 14:39:07 crc kubenswrapper[5037]: E1126 14:39:07.654067 5037 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 26 14:39:07 crc kubenswrapper[5037]: E1126 14:39:07.654102 5037 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 26 14:39:07 crc kubenswrapper[5037]: E1126 14:39:07.654204 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/10886f85-c800-4999-8c79-c490c60696cc-etc-swift podName:10886f85-c800-4999-8c79-c490c60696cc nodeName:}" failed. No retries permitted until 2025-11-26 14:39:08.654167594 +0000 UTC m=+1415.450937818 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/10886f85-c800-4999-8c79-c490c60696cc-etc-swift") pod "swift-storage-0" (UID: "10886f85-c800-4999-8c79-c490c60696cc") : configmap "swift-ring-files" not found Nov 26 14:39:07 crc kubenswrapper[5037]: I1126 14:39:07.919969 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d63e10d-a167-4255-bb40-cdcf22f68680" path="/var/lib/kubelet/pods/8d63e10d-a167-4255-bb40-cdcf22f68680/volumes" Nov 26 14:39:08 crc kubenswrapper[5037]: I1126 14:39:08.261999 5037 generic.go:334] "Generic (PLEG): container finished" podID="8e71c09e-69be-4196-beaf-e34c6b9880bb" containerID="cce77c2d47eb6c8a42167b67fa100f6a46dae5339817afbff1660e9604e478b8" exitCode=0 Nov 26 14:39:08 crc kubenswrapper[5037]: I1126 14:39:08.262071 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf8bcbfcf-5cc8r" event={"ID":"8e71c09e-69be-4196-beaf-e34c6b9880bb","Type":"ContainerDied","Data":"cce77c2d47eb6c8a42167b67fa100f6a46dae5339817afbff1660e9604e478b8"} Nov 26 14:39:08 crc kubenswrapper[5037]: I1126 14:39:08.669341 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/10886f85-c800-4999-8c79-c490c60696cc-etc-swift\") pod \"swift-storage-0\" (UID: \"10886f85-c800-4999-8c79-c490c60696cc\") " pod="openstack/swift-storage-0" Nov 26 14:39:08 crc kubenswrapper[5037]: E1126 14:39:08.669708 5037 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 26 14:39:08 crc kubenswrapper[5037]: E1126 14:39:08.669781 5037 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 26 14:39:08 crc kubenswrapper[5037]: E1126 14:39:08.669903 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/10886f85-c800-4999-8c79-c490c60696cc-etc-swift podName:10886f85-c800-4999-8c79-c490c60696cc nodeName:}" failed. No retries permitted until 2025-11-26 14:39:10.669874893 +0000 UTC m=+1417.466645107 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/10886f85-c800-4999-8c79-c490c60696cc-etc-swift") pod "swift-storage-0" (UID: "10886f85-c800-4999-8c79-c490c60696cc") : configmap "swift-ring-files" not found Nov 26 14:39:09 crc kubenswrapper[5037]: I1126 14:39:09.273370 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf8bcbfcf-5cc8r" event={"ID":"8e71c09e-69be-4196-beaf-e34c6b9880bb","Type":"ContainerStarted","Data":"4b1a296a4b64571a6a460f1df382d22957f75c8bfc4cd8ab7cdb5659c53a0d9e"} Nov 26 14:39:09 crc kubenswrapper[5037]: I1126 14:39:09.273540 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-cf8bcbfcf-5cc8r" Nov 26 14:39:09 crc kubenswrapper[5037]: I1126 14:39:09.293992 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-cf8bcbfcf-5cc8r" podStartSLOduration=4.293973007 podStartE2EDuration="4.293973007s" podCreationTimestamp="2025-11-26 14:39:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:39:09.290160954 +0000 UTC m=+1416.086931148" watchObservedRunningTime="2025-11-26 14:39:09.293973007 +0000 UTC m=+1416.090743191" Nov 26 14:39:10 crc kubenswrapper[5037]: I1126 14:39:10.707690 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/10886f85-c800-4999-8c79-c490c60696cc-etc-swift\") pod \"swift-storage-0\" (UID: \"10886f85-c800-4999-8c79-c490c60696cc\") " pod="openstack/swift-storage-0" Nov 26 14:39:10 crc kubenswrapper[5037]: E1126 14:39:10.707922 5037 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 26 14:39:10 crc kubenswrapper[5037]: E1126 14:39:10.707954 5037 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 26 14:39:10 crc kubenswrapper[5037]: E1126 14:39:10.708022 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/10886f85-c800-4999-8c79-c490c60696cc-etc-swift podName:10886f85-c800-4999-8c79-c490c60696cc nodeName:}" failed. No retries permitted until 2025-11-26 14:39:14.708000481 +0000 UTC m=+1421.504770665 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/10886f85-c800-4999-8c79-c490c60696cc-etc-swift") pod "swift-storage-0" (UID: "10886f85-c800-4999-8c79-c490c60696cc") : configmap "swift-ring-files" not found Nov 26 14:39:10 crc kubenswrapper[5037]: I1126 14:39:10.908570 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-7j79r"] Nov 26 14:39:10 crc kubenswrapper[5037]: I1126 14:39:10.910200 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-7j79r" Nov 26 14:39:10 crc kubenswrapper[5037]: I1126 14:39:10.912240 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 26 14:39:10 crc kubenswrapper[5037]: I1126 14:39:10.912760 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Nov 26 14:39:10 crc kubenswrapper[5037]: I1126 14:39:10.913027 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Nov 26 14:39:10 crc kubenswrapper[5037]: I1126 14:39:10.928878 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-7j79r"] Nov 26 14:39:11 crc kubenswrapper[5037]: I1126 14:39:11.012442 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-scripts\") pod \"swift-ring-rebalance-7j79r\" (UID: \"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8\") " pod="openstack/swift-ring-rebalance-7j79r" Nov 26 14:39:11 crc kubenswrapper[5037]: I1126 14:39:11.012552 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-swiftconf\") pod \"swift-ring-rebalance-7j79r\" (UID: \"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8\") " pod="openstack/swift-ring-rebalance-7j79r" Nov 26 14:39:11 crc kubenswrapper[5037]: I1126 14:39:11.012571 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-dispersionconf\") pod \"swift-ring-rebalance-7j79r\" (UID: \"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8\") " pod="openstack/swift-ring-rebalance-7j79r" Nov 26 14:39:11 crc kubenswrapper[5037]: I1126 14:39:11.012592 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-etc-swift\") pod \"swift-ring-rebalance-7j79r\" (UID: \"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8\") " pod="openstack/swift-ring-rebalance-7j79r" Nov 26 14:39:11 crc kubenswrapper[5037]: I1126 14:39:11.012632 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sc5sv\" (UniqueName: \"kubernetes.io/projected/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-kube-api-access-sc5sv\") pod \"swift-ring-rebalance-7j79r\" (UID: \"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8\") " pod="openstack/swift-ring-rebalance-7j79r" Nov 26 14:39:11 crc kubenswrapper[5037]: I1126 14:39:11.012739 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-combined-ca-bundle\") pod \"swift-ring-rebalance-7j79r\" (UID: \"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8\") " pod="openstack/swift-ring-rebalance-7j79r" Nov 26 14:39:11 crc kubenswrapper[5037]: I1126 14:39:11.012804 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-ring-data-devices\") pod \"swift-ring-rebalance-7j79r\" (UID: \"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8\") " pod="openstack/swift-ring-rebalance-7j79r" Nov 26 14:39:11 crc kubenswrapper[5037]: I1126 14:39:11.114460 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-swiftconf\") pod \"swift-ring-rebalance-7j79r\" (UID: \"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8\") " pod="openstack/swift-ring-rebalance-7j79r" Nov 26 14:39:11 crc kubenswrapper[5037]: I1126 14:39:11.114846 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-dispersionconf\") pod \"swift-ring-rebalance-7j79r\" (UID: \"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8\") " pod="openstack/swift-ring-rebalance-7j79r" Nov 26 14:39:11 crc kubenswrapper[5037]: I1126 14:39:11.115072 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-etc-swift\") pod \"swift-ring-rebalance-7j79r\" (UID: \"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8\") " pod="openstack/swift-ring-rebalance-7j79r" Nov 26 14:39:11 crc kubenswrapper[5037]: I1126 14:39:11.115202 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sc5sv\" (UniqueName: \"kubernetes.io/projected/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-kube-api-access-sc5sv\") pod \"swift-ring-rebalance-7j79r\" (UID: \"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8\") " pod="openstack/swift-ring-rebalance-7j79r" Nov 26 14:39:11 crc kubenswrapper[5037]: I1126 14:39:11.115420 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-combined-ca-bundle\") pod \"swift-ring-rebalance-7j79r\" (UID: \"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8\") " pod="openstack/swift-ring-rebalance-7j79r" Nov 26 14:39:11 crc kubenswrapper[5037]: I1126 14:39:11.115568 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-ring-data-devices\") pod \"swift-ring-rebalance-7j79r\" (UID: \"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8\") " pod="openstack/swift-ring-rebalance-7j79r" Nov 26 14:39:11 crc kubenswrapper[5037]: I1126 14:39:11.115696 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-scripts\") pod \"swift-ring-rebalance-7j79r\" (UID: \"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8\") " pod="openstack/swift-ring-rebalance-7j79r" Nov 26 14:39:11 crc kubenswrapper[5037]: I1126 14:39:11.116614 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-etc-swift\") pod \"swift-ring-rebalance-7j79r\" (UID: \"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8\") " pod="openstack/swift-ring-rebalance-7j79r" Nov 26 14:39:11 crc kubenswrapper[5037]: I1126 14:39:11.116911 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-scripts\") pod \"swift-ring-rebalance-7j79r\" (UID: \"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8\") " pod="openstack/swift-ring-rebalance-7j79r" Nov 26 14:39:11 crc kubenswrapper[5037]: I1126 14:39:11.116909 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-ring-data-devices\") pod \"swift-ring-rebalance-7j79r\" (UID: \"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8\") " pod="openstack/swift-ring-rebalance-7j79r" Nov 26 14:39:11 crc kubenswrapper[5037]: I1126 14:39:11.122797 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-swiftconf\") pod \"swift-ring-rebalance-7j79r\" (UID: \"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8\") " pod="openstack/swift-ring-rebalance-7j79r" Nov 26 14:39:11 crc kubenswrapper[5037]: I1126 14:39:11.122974 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-combined-ca-bundle\") pod \"swift-ring-rebalance-7j79r\" (UID: \"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8\") " pod="openstack/swift-ring-rebalance-7j79r" Nov 26 14:39:11 crc kubenswrapper[5037]: I1126 14:39:11.123929 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-dispersionconf\") pod \"swift-ring-rebalance-7j79r\" (UID: \"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8\") " pod="openstack/swift-ring-rebalance-7j79r" Nov 26 14:39:11 crc kubenswrapper[5037]: I1126 14:39:11.148616 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sc5sv\" (UniqueName: \"kubernetes.io/projected/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-kube-api-access-sc5sv\") pod \"swift-ring-rebalance-7j79r\" (UID: \"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8\") " pod="openstack/swift-ring-rebalance-7j79r" Nov 26 14:39:11 crc kubenswrapper[5037]: I1126 14:39:11.273154 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-7j79r" Nov 26 14:39:11 crc kubenswrapper[5037]: I1126 14:39:11.793265 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-7j79r"] Nov 26 14:39:11 crc kubenswrapper[5037]: W1126 14:39:11.795340 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod35b26e94_ffdb_4ee2_9940_efa9d8fd74b8.slice/crio-15bbe51c5842a1d0c6293aa1d46d89ab701c4595c9e01e6804638567b6049c31 WatchSource:0}: Error finding container 15bbe51c5842a1d0c6293aa1d46d89ab701c4595c9e01e6804638567b6049c31: Status 404 returned error can't find the container with id 15bbe51c5842a1d0c6293aa1d46d89ab701c4595c9e01e6804638567b6049c31 Nov 26 14:39:12 crc kubenswrapper[5037]: I1126 14:39:12.301078 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-7j79r" event={"ID":"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8","Type":"ContainerStarted","Data":"15bbe51c5842a1d0c6293aa1d46d89ab701c4595c9e01e6804638567b6049c31"} Nov 26 14:39:12 crc kubenswrapper[5037]: I1126 14:39:12.554448 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 26 14:39:12 crc kubenswrapper[5037]: I1126 14:39:12.554497 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 26 14:39:12 crc kubenswrapper[5037]: I1126 14:39:12.681308 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 26 14:39:13 crc kubenswrapper[5037]: I1126 14:39:13.385097 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 26 14:39:13 crc kubenswrapper[5037]: I1126 14:39:13.633499 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-c2fe-account-create-update-r7tgg"] Nov 26 14:39:13 crc kubenswrapper[5037]: I1126 14:39:13.635080 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-c2fe-account-create-update-r7tgg" Nov 26 14:39:13 crc kubenswrapper[5037]: I1126 14:39:13.640215 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 26 14:39:13 crc kubenswrapper[5037]: I1126 14:39:13.649111 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-c2fe-account-create-update-r7tgg"] Nov 26 14:39:13 crc kubenswrapper[5037]: I1126 14:39:13.669803 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a9f650aa-da8e-4fe4-ab8f-980adc19129a-operator-scripts\") pod \"keystone-c2fe-account-create-update-r7tgg\" (UID: \"a9f650aa-da8e-4fe4-ab8f-980adc19129a\") " pod="openstack/keystone-c2fe-account-create-update-r7tgg" Nov 26 14:39:13 crc kubenswrapper[5037]: I1126 14:39:13.670906 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f776q\" (UniqueName: \"kubernetes.io/projected/a9f650aa-da8e-4fe4-ab8f-980adc19129a-kube-api-access-f776q\") pod \"keystone-c2fe-account-create-update-r7tgg\" (UID: \"a9f650aa-da8e-4fe4-ab8f-980adc19129a\") " pod="openstack/keystone-c2fe-account-create-update-r7tgg" Nov 26 14:39:13 crc kubenswrapper[5037]: I1126 14:39:13.719031 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-bvnrk"] Nov 26 14:39:13 crc kubenswrapper[5037]: I1126 14:39:13.720491 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-bvnrk" Nov 26 14:39:13 crc kubenswrapper[5037]: I1126 14:39:13.731773 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-bvnrk"] Nov 26 14:39:13 crc kubenswrapper[5037]: I1126 14:39:13.772371 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f776q\" (UniqueName: \"kubernetes.io/projected/a9f650aa-da8e-4fe4-ab8f-980adc19129a-kube-api-access-f776q\") pod \"keystone-c2fe-account-create-update-r7tgg\" (UID: \"a9f650aa-da8e-4fe4-ab8f-980adc19129a\") " pod="openstack/keystone-c2fe-account-create-update-r7tgg" Nov 26 14:39:13 crc kubenswrapper[5037]: I1126 14:39:13.772687 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8xlss\" (UniqueName: \"kubernetes.io/projected/a03d1e71-eb0c-4ec4-8d33-39535460bc50-kube-api-access-8xlss\") pod \"keystone-db-create-bvnrk\" (UID: \"a03d1e71-eb0c-4ec4-8d33-39535460bc50\") " pod="openstack/keystone-db-create-bvnrk" Nov 26 14:39:13 crc kubenswrapper[5037]: I1126 14:39:13.772870 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a9f650aa-da8e-4fe4-ab8f-980adc19129a-operator-scripts\") pod \"keystone-c2fe-account-create-update-r7tgg\" (UID: \"a9f650aa-da8e-4fe4-ab8f-980adc19129a\") " pod="openstack/keystone-c2fe-account-create-update-r7tgg" Nov 26 14:39:13 crc kubenswrapper[5037]: I1126 14:39:13.773168 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a03d1e71-eb0c-4ec4-8d33-39535460bc50-operator-scripts\") pod \"keystone-db-create-bvnrk\" (UID: \"a03d1e71-eb0c-4ec4-8d33-39535460bc50\") " pod="openstack/keystone-db-create-bvnrk" Nov 26 14:39:13 crc kubenswrapper[5037]: I1126 14:39:13.773809 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a9f650aa-da8e-4fe4-ab8f-980adc19129a-operator-scripts\") pod \"keystone-c2fe-account-create-update-r7tgg\" (UID: \"a9f650aa-da8e-4fe4-ab8f-980adc19129a\") " pod="openstack/keystone-c2fe-account-create-update-r7tgg" Nov 26 14:39:13 crc kubenswrapper[5037]: I1126 14:39:13.792131 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f776q\" (UniqueName: \"kubernetes.io/projected/a9f650aa-da8e-4fe4-ab8f-980adc19129a-kube-api-access-f776q\") pod \"keystone-c2fe-account-create-update-r7tgg\" (UID: \"a9f650aa-da8e-4fe4-ab8f-980adc19129a\") " pod="openstack/keystone-c2fe-account-create-update-r7tgg" Nov 26 14:39:13 crc kubenswrapper[5037]: I1126 14:39:13.875789 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8xlss\" (UniqueName: \"kubernetes.io/projected/a03d1e71-eb0c-4ec4-8d33-39535460bc50-kube-api-access-8xlss\") pod \"keystone-db-create-bvnrk\" (UID: \"a03d1e71-eb0c-4ec4-8d33-39535460bc50\") " pod="openstack/keystone-db-create-bvnrk" Nov 26 14:39:13 crc kubenswrapper[5037]: I1126 14:39:13.875947 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a03d1e71-eb0c-4ec4-8d33-39535460bc50-operator-scripts\") pod \"keystone-db-create-bvnrk\" (UID: \"a03d1e71-eb0c-4ec4-8d33-39535460bc50\") " pod="openstack/keystone-db-create-bvnrk" Nov 26 14:39:13 crc kubenswrapper[5037]: I1126 14:39:13.878114 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a03d1e71-eb0c-4ec4-8d33-39535460bc50-operator-scripts\") pod \"keystone-db-create-bvnrk\" (UID: \"a03d1e71-eb0c-4ec4-8d33-39535460bc50\") " pod="openstack/keystone-db-create-bvnrk" Nov 26 14:39:13 crc kubenswrapper[5037]: I1126 14:39:13.894882 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8xlss\" (UniqueName: \"kubernetes.io/projected/a03d1e71-eb0c-4ec4-8d33-39535460bc50-kube-api-access-8xlss\") pod \"keystone-db-create-bvnrk\" (UID: \"a03d1e71-eb0c-4ec4-8d33-39535460bc50\") " pod="openstack/keystone-db-create-bvnrk" Nov 26 14:39:13 crc kubenswrapper[5037]: I1126 14:39:13.969402 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-c2fe-account-create-update-r7tgg" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.050781 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-bvnrk" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.075207 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.075447 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.076408 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-nrvrw"] Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.077399 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-nrvrw" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.083812 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-nrvrw"] Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.141664 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-259c-account-create-update-jrh9b"] Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.142661 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-259c-account-create-update-jrh9b" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.144771 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.148123 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-259c-account-create-update-jrh9b"] Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.179757 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.181352 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0e421f2b-ccc3-44f2-9646-c51aba1c5706-operator-scripts\") pod \"placement-db-create-nrvrw\" (UID: \"0e421f2b-ccc3-44f2-9646-c51aba1c5706\") " pod="openstack/placement-db-create-nrvrw" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.181603 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hptph\" (UniqueName: \"kubernetes.io/projected/0e421f2b-ccc3-44f2-9646-c51aba1c5706-kube-api-access-hptph\") pod \"placement-db-create-nrvrw\" (UID: \"0e421f2b-ccc3-44f2-9646-c51aba1c5706\") " pod="openstack/placement-db-create-nrvrw" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.181728 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6520b35e-439b-4178-ad5e-9312d57c0fc5-operator-scripts\") pod \"placement-259c-account-create-update-jrh9b\" (UID: \"6520b35e-439b-4178-ad5e-9312d57c0fc5\") " pod="openstack/placement-259c-account-create-update-jrh9b" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.181912 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m6946\" (UniqueName: \"kubernetes.io/projected/6520b35e-439b-4178-ad5e-9312d57c0fc5-kube-api-access-m6946\") pod \"placement-259c-account-create-update-jrh9b\" (UID: \"6520b35e-439b-4178-ad5e-9312d57c0fc5\") " pod="openstack/placement-259c-account-create-update-jrh9b" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.283225 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hptph\" (UniqueName: \"kubernetes.io/projected/0e421f2b-ccc3-44f2-9646-c51aba1c5706-kube-api-access-hptph\") pod \"placement-db-create-nrvrw\" (UID: \"0e421f2b-ccc3-44f2-9646-c51aba1c5706\") " pod="openstack/placement-db-create-nrvrw" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.283329 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6520b35e-439b-4178-ad5e-9312d57c0fc5-operator-scripts\") pod \"placement-259c-account-create-update-jrh9b\" (UID: \"6520b35e-439b-4178-ad5e-9312d57c0fc5\") " pod="openstack/placement-259c-account-create-update-jrh9b" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.283406 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m6946\" (UniqueName: \"kubernetes.io/projected/6520b35e-439b-4178-ad5e-9312d57c0fc5-kube-api-access-m6946\") pod \"placement-259c-account-create-update-jrh9b\" (UID: \"6520b35e-439b-4178-ad5e-9312d57c0fc5\") " pod="openstack/placement-259c-account-create-update-jrh9b" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.283444 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0e421f2b-ccc3-44f2-9646-c51aba1c5706-operator-scripts\") pod \"placement-db-create-nrvrw\" (UID: \"0e421f2b-ccc3-44f2-9646-c51aba1c5706\") " pod="openstack/placement-db-create-nrvrw" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.284867 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6520b35e-439b-4178-ad5e-9312d57c0fc5-operator-scripts\") pod \"placement-259c-account-create-update-jrh9b\" (UID: \"6520b35e-439b-4178-ad5e-9312d57c0fc5\") " pod="openstack/placement-259c-account-create-update-jrh9b" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.286085 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0e421f2b-ccc3-44f2-9646-c51aba1c5706-operator-scripts\") pod \"placement-db-create-nrvrw\" (UID: \"0e421f2b-ccc3-44f2-9646-c51aba1c5706\") " pod="openstack/placement-db-create-nrvrw" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.318370 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hptph\" (UniqueName: \"kubernetes.io/projected/0e421f2b-ccc3-44f2-9646-c51aba1c5706-kube-api-access-hptph\") pod \"placement-db-create-nrvrw\" (UID: \"0e421f2b-ccc3-44f2-9646-c51aba1c5706\") " pod="openstack/placement-db-create-nrvrw" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.319328 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m6946\" (UniqueName: \"kubernetes.io/projected/6520b35e-439b-4178-ad5e-9312d57c0fc5-kube-api-access-m6946\") pod \"placement-259c-account-create-update-jrh9b\" (UID: \"6520b35e-439b-4178-ad5e-9312d57c0fc5\") " pod="openstack/placement-259c-account-create-update-jrh9b" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.346442 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-hsb4f"] Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.347646 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-hsb4f" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.356563 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-hsb4f"] Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.369416 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-4d25-account-create-update-rrt49"] Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.370604 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-4d25-account-create-update-rrt49"] Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.370939 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-4d25-account-create-update-rrt49" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.374044 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.392017 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gr4js\" (UniqueName: \"kubernetes.io/projected/30f3badb-48be-4d24-8f2a-7a3622f0f720-kube-api-access-gr4js\") pod \"glance-db-create-hsb4f\" (UID: \"30f3badb-48be-4d24-8f2a-7a3622f0f720\") " pod="openstack/glance-db-create-hsb4f" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.392116 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h826t\" (UniqueName: \"kubernetes.io/projected/ec74497e-0217-404a-8cb1-510ccc6cba50-kube-api-access-h826t\") pod \"glance-4d25-account-create-update-rrt49\" (UID: \"ec74497e-0217-404a-8cb1-510ccc6cba50\") " pod="openstack/glance-4d25-account-create-update-rrt49" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.392169 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ec74497e-0217-404a-8cb1-510ccc6cba50-operator-scripts\") pod \"glance-4d25-account-create-update-rrt49\" (UID: \"ec74497e-0217-404a-8cb1-510ccc6cba50\") " pod="openstack/glance-4d25-account-create-update-rrt49" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.392270 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/30f3badb-48be-4d24-8f2a-7a3622f0f720-operator-scripts\") pod \"glance-db-create-hsb4f\" (UID: \"30f3badb-48be-4d24-8f2a-7a3622f0f720\") " pod="openstack/glance-db-create-hsb4f" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.405561 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-nrvrw" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.457377 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.464489 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-259c-account-create-update-jrh9b" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.503163 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h826t\" (UniqueName: \"kubernetes.io/projected/ec74497e-0217-404a-8cb1-510ccc6cba50-kube-api-access-h826t\") pod \"glance-4d25-account-create-update-rrt49\" (UID: \"ec74497e-0217-404a-8cb1-510ccc6cba50\") " pod="openstack/glance-4d25-account-create-update-rrt49" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.503378 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ec74497e-0217-404a-8cb1-510ccc6cba50-operator-scripts\") pod \"glance-4d25-account-create-update-rrt49\" (UID: \"ec74497e-0217-404a-8cb1-510ccc6cba50\") " pod="openstack/glance-4d25-account-create-update-rrt49" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.503498 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/30f3badb-48be-4d24-8f2a-7a3622f0f720-operator-scripts\") pod \"glance-db-create-hsb4f\" (UID: \"30f3badb-48be-4d24-8f2a-7a3622f0f720\") " pod="openstack/glance-db-create-hsb4f" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.503644 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gr4js\" (UniqueName: \"kubernetes.io/projected/30f3badb-48be-4d24-8f2a-7a3622f0f720-kube-api-access-gr4js\") pod \"glance-db-create-hsb4f\" (UID: \"30f3badb-48be-4d24-8f2a-7a3622f0f720\") " pod="openstack/glance-db-create-hsb4f" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.506924 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/30f3badb-48be-4d24-8f2a-7a3622f0f720-operator-scripts\") pod \"glance-db-create-hsb4f\" (UID: \"30f3badb-48be-4d24-8f2a-7a3622f0f720\") " pod="openstack/glance-db-create-hsb4f" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.513571 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ec74497e-0217-404a-8cb1-510ccc6cba50-operator-scripts\") pod \"glance-4d25-account-create-update-rrt49\" (UID: \"ec74497e-0217-404a-8cb1-510ccc6cba50\") " pod="openstack/glance-4d25-account-create-update-rrt49" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.521653 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h826t\" (UniqueName: \"kubernetes.io/projected/ec74497e-0217-404a-8cb1-510ccc6cba50-kube-api-access-h826t\") pod \"glance-4d25-account-create-update-rrt49\" (UID: \"ec74497e-0217-404a-8cb1-510ccc6cba50\") " pod="openstack/glance-4d25-account-create-update-rrt49" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.522099 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gr4js\" (UniqueName: \"kubernetes.io/projected/30f3badb-48be-4d24-8f2a-7a3622f0f720-kube-api-access-gr4js\") pod \"glance-db-create-hsb4f\" (UID: \"30f3badb-48be-4d24-8f2a-7a3622f0f720\") " pod="openstack/glance-db-create-hsb4f" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.722627 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-hsb4f" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.746451 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-4d25-account-create-update-rrt49" Nov 26 14:39:14 crc kubenswrapper[5037]: I1126 14:39:14.807690 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/10886f85-c800-4999-8c79-c490c60696cc-etc-swift\") pod \"swift-storage-0\" (UID: \"10886f85-c800-4999-8c79-c490c60696cc\") " pod="openstack/swift-storage-0" Nov 26 14:39:14 crc kubenswrapper[5037]: E1126 14:39:14.807890 5037 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 26 14:39:14 crc kubenswrapper[5037]: E1126 14:39:14.807910 5037 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 26 14:39:14 crc kubenswrapper[5037]: E1126 14:39:14.807968 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/10886f85-c800-4999-8c79-c490c60696cc-etc-swift podName:10886f85-c800-4999-8c79-c490c60696cc nodeName:}" failed. No retries permitted until 2025-11-26 14:39:22.807946987 +0000 UTC m=+1429.604717171 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/10886f85-c800-4999-8c79-c490c60696cc-etc-swift") pod "swift-storage-0" (UID: "10886f85-c800-4999-8c79-c490c60696cc") : configmap "swift-ring-files" not found Nov 26 14:39:16 crc kubenswrapper[5037]: I1126 14:39:16.196560 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-cf8bcbfcf-5cc8r" Nov 26 14:39:16 crc kubenswrapper[5037]: I1126 14:39:16.264942 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6486446b9f-gc7pm"] Nov 26 14:39:16 crc kubenswrapper[5037]: I1126 14:39:16.271457 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6486446b9f-gc7pm" podUID="073bdcf3-16fa-4b27-8a82-709bf0e1bf1c" containerName="dnsmasq-dns" containerID="cri-o://ea1009d7d3d459178be36a42c970086c0297f9bb2bdecc2bd997fc30c8d12a06" gracePeriod=10 Nov 26 14:39:16 crc kubenswrapper[5037]: I1126 14:39:16.386635 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 26 14:39:16 crc kubenswrapper[5037]: I1126 14:39:16.791952 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6486446b9f-gc7pm" Nov 26 14:39:16 crc kubenswrapper[5037]: I1126 14:39:16.950724 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/073bdcf3-16fa-4b27-8a82-709bf0e1bf1c-dns-svc\") pod \"073bdcf3-16fa-4b27-8a82-709bf0e1bf1c\" (UID: \"073bdcf3-16fa-4b27-8a82-709bf0e1bf1c\") " Nov 26 14:39:16 crc kubenswrapper[5037]: I1126 14:39:16.950824 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/073bdcf3-16fa-4b27-8a82-709bf0e1bf1c-config\") pod \"073bdcf3-16fa-4b27-8a82-709bf0e1bf1c\" (UID: \"073bdcf3-16fa-4b27-8a82-709bf0e1bf1c\") " Nov 26 14:39:16 crc kubenswrapper[5037]: I1126 14:39:16.950924 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ns68w\" (UniqueName: \"kubernetes.io/projected/073bdcf3-16fa-4b27-8a82-709bf0e1bf1c-kube-api-access-ns68w\") pod \"073bdcf3-16fa-4b27-8a82-709bf0e1bf1c\" (UID: \"073bdcf3-16fa-4b27-8a82-709bf0e1bf1c\") " Nov 26 14:39:16 crc kubenswrapper[5037]: I1126 14:39:16.957597 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/073bdcf3-16fa-4b27-8a82-709bf0e1bf1c-kube-api-access-ns68w" (OuterVolumeSpecName: "kube-api-access-ns68w") pod "073bdcf3-16fa-4b27-8a82-709bf0e1bf1c" (UID: "073bdcf3-16fa-4b27-8a82-709bf0e1bf1c"). InnerVolumeSpecName "kube-api-access-ns68w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:39:16 crc kubenswrapper[5037]: I1126 14:39:16.989534 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/073bdcf3-16fa-4b27-8a82-709bf0e1bf1c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "073bdcf3-16fa-4b27-8a82-709bf0e1bf1c" (UID: "073bdcf3-16fa-4b27-8a82-709bf0e1bf1c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:39:17 crc kubenswrapper[5037]: I1126 14:39:17.003029 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/073bdcf3-16fa-4b27-8a82-709bf0e1bf1c-config" (OuterVolumeSpecName: "config") pod "073bdcf3-16fa-4b27-8a82-709bf0e1bf1c" (UID: "073bdcf3-16fa-4b27-8a82-709bf0e1bf1c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:39:17 crc kubenswrapper[5037]: I1126 14:39:17.053684 5037 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/073bdcf3-16fa-4b27-8a82-709bf0e1bf1c-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:17 crc kubenswrapper[5037]: I1126 14:39:17.053723 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/073bdcf3-16fa-4b27-8a82-709bf0e1bf1c-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:17 crc kubenswrapper[5037]: I1126 14:39:17.053735 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ns68w\" (UniqueName: \"kubernetes.io/projected/073bdcf3-16fa-4b27-8a82-709bf0e1bf1c-kube-api-access-ns68w\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:17 crc kubenswrapper[5037]: I1126 14:39:17.125074 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-259c-account-create-update-jrh9b"] Nov 26 14:39:17 crc kubenswrapper[5037]: W1126 14:39:17.152969 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod30f3badb_48be_4d24_8f2a_7a3622f0f720.slice/crio-2095b8705d45333d3f925e71578f05464d5ad80f94f2983498569b0f2b9daedd WatchSource:0}: Error finding container 2095b8705d45333d3f925e71578f05464d5ad80f94f2983498569b0f2b9daedd: Status 404 returned error can't find the container with id 2095b8705d45333d3f925e71578f05464d5ad80f94f2983498569b0f2b9daedd Nov 26 14:39:17 crc kubenswrapper[5037]: I1126 14:39:17.153890 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-4d25-account-create-update-rrt49"] Nov 26 14:39:17 crc kubenswrapper[5037]: I1126 14:39:17.162666 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-hsb4f"] Nov 26 14:39:17 crc kubenswrapper[5037]: I1126 14:39:17.169666 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-nrvrw"] Nov 26 14:39:17 crc kubenswrapper[5037]: I1126 14:39:17.253088 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-bvnrk"] Nov 26 14:39:17 crc kubenswrapper[5037]: I1126 14:39:17.285200 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-c2fe-account-create-update-r7tgg"] Nov 26 14:39:17 crc kubenswrapper[5037]: W1126 14:39:17.313753 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda9f650aa_da8e_4fe4_ab8f_980adc19129a.slice/crio-6c9a70abe692576794fd6e20c12672fcea695e95fce0700886e8f089e4443b90 WatchSource:0}: Error finding container 6c9a70abe692576794fd6e20c12672fcea695e95fce0700886e8f089e4443b90: Status 404 returned error can't find the container with id 6c9a70abe692576794fd6e20c12672fcea695e95fce0700886e8f089e4443b90 Nov 26 14:39:17 crc kubenswrapper[5037]: I1126 14:39:17.361585 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-nrvrw" event={"ID":"0e421f2b-ccc3-44f2-9646-c51aba1c5706","Type":"ContainerStarted","Data":"19003440a1bd9cf39c6951008fca27cfddc7559bb73a4ca239783df519f973be"} Nov 26 14:39:17 crc kubenswrapper[5037]: I1126 14:39:17.364211 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-7j79r" event={"ID":"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8","Type":"ContainerStarted","Data":"cacc6b734cc7e2bff01ef129c3b33307509445bfd203135818102bd120e021b4"} Nov 26 14:39:17 crc kubenswrapper[5037]: I1126 14:39:17.368184 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-c2fe-account-create-update-r7tgg" event={"ID":"a9f650aa-da8e-4fe4-ab8f-980adc19129a","Type":"ContainerStarted","Data":"6c9a70abe692576794fd6e20c12672fcea695e95fce0700886e8f089e4443b90"} Nov 26 14:39:17 crc kubenswrapper[5037]: I1126 14:39:17.373617 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-bvnrk" event={"ID":"a03d1e71-eb0c-4ec4-8d33-39535460bc50","Type":"ContainerStarted","Data":"fbfaeeba37aa51ace24fe8ea2ee6535dba2af75cfbeb785b67f1128bcef51c0c"} Nov 26 14:39:17 crc kubenswrapper[5037]: I1126 14:39:17.389140 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-7j79r" podStartSLOduration=2.576606136 podStartE2EDuration="7.389124124s" podCreationTimestamp="2025-11-26 14:39:10 +0000 UTC" firstStartedPulling="2025-11-26 14:39:11.800018451 +0000 UTC m=+1418.596788645" lastFinishedPulling="2025-11-26 14:39:16.612536449 +0000 UTC m=+1423.409306633" observedRunningTime="2025-11-26 14:39:17.384761878 +0000 UTC m=+1424.181532072" watchObservedRunningTime="2025-11-26 14:39:17.389124124 +0000 UTC m=+1424.185894308" Nov 26 14:39:17 crc kubenswrapper[5037]: I1126 14:39:17.399975 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-259c-account-create-update-jrh9b" event={"ID":"6520b35e-439b-4178-ad5e-9312d57c0fc5","Type":"ContainerStarted","Data":"4eb15cb32038efea094cfdd9de7d738b4b794156617ac3711e9211ae2164489c"} Nov 26 14:39:17 crc kubenswrapper[5037]: I1126 14:39:17.400028 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-259c-account-create-update-jrh9b" event={"ID":"6520b35e-439b-4178-ad5e-9312d57c0fc5","Type":"ContainerStarted","Data":"026de11f1e366c1c3954db8d96e380a8442f9e41217aa23a07881785b2c20ec7"} Nov 26 14:39:17 crc kubenswrapper[5037]: I1126 14:39:17.407813 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-4d25-account-create-update-rrt49" event={"ID":"ec74497e-0217-404a-8cb1-510ccc6cba50","Type":"ContainerStarted","Data":"c7ef44af785d4822b364555ca5319d8f16a3d132aebbe2af46dca45d53b91cf0"} Nov 26 14:39:17 crc kubenswrapper[5037]: I1126 14:39:17.415743 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-hsb4f" event={"ID":"30f3badb-48be-4d24-8f2a-7a3622f0f720","Type":"ContainerStarted","Data":"2095b8705d45333d3f925e71578f05464d5ad80f94f2983498569b0f2b9daedd"} Nov 26 14:39:17 crc kubenswrapper[5037]: I1126 14:39:17.418213 5037 generic.go:334] "Generic (PLEG): container finished" podID="073bdcf3-16fa-4b27-8a82-709bf0e1bf1c" containerID="ea1009d7d3d459178be36a42c970086c0297f9bb2bdecc2bd997fc30c8d12a06" exitCode=0 Nov 26 14:39:17 crc kubenswrapper[5037]: I1126 14:39:17.418246 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6486446b9f-gc7pm" event={"ID":"073bdcf3-16fa-4b27-8a82-709bf0e1bf1c","Type":"ContainerDied","Data":"ea1009d7d3d459178be36a42c970086c0297f9bb2bdecc2bd997fc30c8d12a06"} Nov 26 14:39:17 crc kubenswrapper[5037]: I1126 14:39:17.418261 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6486446b9f-gc7pm" event={"ID":"073bdcf3-16fa-4b27-8a82-709bf0e1bf1c","Type":"ContainerDied","Data":"5a4dcf11e537568bf4494020f248d1d7b9f7c7f4b619cdf8bbc676287241731c"} Nov 26 14:39:17 crc kubenswrapper[5037]: I1126 14:39:17.418299 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6486446b9f-gc7pm" Nov 26 14:39:17 crc kubenswrapper[5037]: I1126 14:39:17.418279 5037 scope.go:117] "RemoveContainer" containerID="ea1009d7d3d459178be36a42c970086c0297f9bb2bdecc2bd997fc30c8d12a06" Nov 26 14:39:17 crc kubenswrapper[5037]: I1126 14:39:17.463679 5037 scope.go:117] "RemoveContainer" containerID="845a045204cfcb44bec0285e07c1da985f5f5b2cce872d3a31832ee3ec411ff4" Nov 26 14:39:17 crc kubenswrapper[5037]: I1126 14:39:17.532383 5037 scope.go:117] "RemoveContainer" containerID="ea1009d7d3d459178be36a42c970086c0297f9bb2bdecc2bd997fc30c8d12a06" Nov 26 14:39:17 crc kubenswrapper[5037]: E1126 14:39:17.532862 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ea1009d7d3d459178be36a42c970086c0297f9bb2bdecc2bd997fc30c8d12a06\": container with ID starting with ea1009d7d3d459178be36a42c970086c0297f9bb2bdecc2bd997fc30c8d12a06 not found: ID does not exist" containerID="ea1009d7d3d459178be36a42c970086c0297f9bb2bdecc2bd997fc30c8d12a06" Nov 26 14:39:17 crc kubenswrapper[5037]: I1126 14:39:17.532899 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ea1009d7d3d459178be36a42c970086c0297f9bb2bdecc2bd997fc30c8d12a06"} err="failed to get container status \"ea1009d7d3d459178be36a42c970086c0297f9bb2bdecc2bd997fc30c8d12a06\": rpc error: code = NotFound desc = could not find container \"ea1009d7d3d459178be36a42c970086c0297f9bb2bdecc2bd997fc30c8d12a06\": container with ID starting with ea1009d7d3d459178be36a42c970086c0297f9bb2bdecc2bd997fc30c8d12a06 not found: ID does not exist" Nov 26 14:39:17 crc kubenswrapper[5037]: I1126 14:39:17.532926 5037 scope.go:117] "RemoveContainer" containerID="845a045204cfcb44bec0285e07c1da985f5f5b2cce872d3a31832ee3ec411ff4" Nov 26 14:39:17 crc kubenswrapper[5037]: E1126 14:39:17.533361 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"845a045204cfcb44bec0285e07c1da985f5f5b2cce872d3a31832ee3ec411ff4\": container with ID starting with 845a045204cfcb44bec0285e07c1da985f5f5b2cce872d3a31832ee3ec411ff4 not found: ID does not exist" containerID="845a045204cfcb44bec0285e07c1da985f5f5b2cce872d3a31832ee3ec411ff4" Nov 26 14:39:17 crc kubenswrapper[5037]: I1126 14:39:17.533417 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"845a045204cfcb44bec0285e07c1da985f5f5b2cce872d3a31832ee3ec411ff4"} err="failed to get container status \"845a045204cfcb44bec0285e07c1da985f5f5b2cce872d3a31832ee3ec411ff4\": rpc error: code = NotFound desc = could not find container \"845a045204cfcb44bec0285e07c1da985f5f5b2cce872d3a31832ee3ec411ff4\": container with ID starting with 845a045204cfcb44bec0285e07c1da985f5f5b2cce872d3a31832ee3ec411ff4 not found: ID does not exist" Nov 26 14:39:17 crc kubenswrapper[5037]: I1126 14:39:17.535762 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6486446b9f-gc7pm"] Nov 26 14:39:17 crc kubenswrapper[5037]: I1126 14:39:17.542952 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6486446b9f-gc7pm"] Nov 26 14:39:17 crc kubenswrapper[5037]: I1126 14:39:17.918437 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="073bdcf3-16fa-4b27-8a82-709bf0e1bf1c" path="/var/lib/kubelet/pods/073bdcf3-16fa-4b27-8a82-709bf0e1bf1c/volumes" Nov 26 14:39:18 crc kubenswrapper[5037]: I1126 14:39:18.429480 5037 generic.go:334] "Generic (PLEG): container finished" podID="30f3badb-48be-4d24-8f2a-7a3622f0f720" containerID="77c7c1aa98cd388dde079bf390d4f95410a791cc036f4afe9dfb28d8e40a7d3a" exitCode=0 Nov 26 14:39:18 crc kubenswrapper[5037]: I1126 14:39:18.429571 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-hsb4f" event={"ID":"30f3badb-48be-4d24-8f2a-7a3622f0f720","Type":"ContainerDied","Data":"77c7c1aa98cd388dde079bf390d4f95410a791cc036f4afe9dfb28d8e40a7d3a"} Nov 26 14:39:18 crc kubenswrapper[5037]: I1126 14:39:18.443396 5037 generic.go:334] "Generic (PLEG): container finished" podID="0e421f2b-ccc3-44f2-9646-c51aba1c5706" containerID="462372bb6dc3a0113047a1731960cccabf72571beca4065fd8e4365e72f78a6d" exitCode=0 Nov 26 14:39:18 crc kubenswrapper[5037]: I1126 14:39:18.443457 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-nrvrw" event={"ID":"0e421f2b-ccc3-44f2-9646-c51aba1c5706","Type":"ContainerDied","Data":"462372bb6dc3a0113047a1731960cccabf72571beca4065fd8e4365e72f78a6d"} Nov 26 14:39:18 crc kubenswrapper[5037]: I1126 14:39:18.448272 5037 generic.go:334] "Generic (PLEG): container finished" podID="a9f650aa-da8e-4fe4-ab8f-980adc19129a" containerID="880247269d69779bbf9a952eff9c17aaddc108b220a076adf10b12d9e8d111eb" exitCode=0 Nov 26 14:39:18 crc kubenswrapper[5037]: I1126 14:39:18.448376 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-c2fe-account-create-update-r7tgg" event={"ID":"a9f650aa-da8e-4fe4-ab8f-980adc19129a","Type":"ContainerDied","Data":"880247269d69779bbf9a952eff9c17aaddc108b220a076adf10b12d9e8d111eb"} Nov 26 14:39:18 crc kubenswrapper[5037]: I1126 14:39:18.459738 5037 generic.go:334] "Generic (PLEG): container finished" podID="a03d1e71-eb0c-4ec4-8d33-39535460bc50" containerID="21def421ecbae44cdacf0b7d1286303125364d4d05b4ade8922e55bf4ed25ba9" exitCode=0 Nov 26 14:39:18 crc kubenswrapper[5037]: I1126 14:39:18.459861 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-bvnrk" event={"ID":"a03d1e71-eb0c-4ec4-8d33-39535460bc50","Type":"ContainerDied","Data":"21def421ecbae44cdacf0b7d1286303125364d4d05b4ade8922e55bf4ed25ba9"} Nov 26 14:39:18 crc kubenswrapper[5037]: I1126 14:39:18.462187 5037 generic.go:334] "Generic (PLEG): container finished" podID="6520b35e-439b-4178-ad5e-9312d57c0fc5" containerID="4eb15cb32038efea094cfdd9de7d738b4b794156617ac3711e9211ae2164489c" exitCode=0 Nov 26 14:39:18 crc kubenswrapper[5037]: I1126 14:39:18.462267 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-259c-account-create-update-jrh9b" event={"ID":"6520b35e-439b-4178-ad5e-9312d57c0fc5","Type":"ContainerDied","Data":"4eb15cb32038efea094cfdd9de7d738b4b794156617ac3711e9211ae2164489c"} Nov 26 14:39:18 crc kubenswrapper[5037]: I1126 14:39:18.464056 5037 generic.go:334] "Generic (PLEG): container finished" podID="ec74497e-0217-404a-8cb1-510ccc6cba50" containerID="7dd90ada4d32cc3f0045ed892e7616d50b4fe26936dc57ecbadc1f8f2b7f6564" exitCode=0 Nov 26 14:39:18 crc kubenswrapper[5037]: I1126 14:39:18.464105 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-4d25-account-create-update-rrt49" event={"ID":"ec74497e-0217-404a-8cb1-510ccc6cba50","Type":"ContainerDied","Data":"7dd90ada4d32cc3f0045ed892e7616d50b4fe26936dc57ecbadc1f8f2b7f6564"} Nov 26 14:39:19 crc kubenswrapper[5037]: I1126 14:39:19.894055 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-nrvrw" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.011994 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hptph\" (UniqueName: \"kubernetes.io/projected/0e421f2b-ccc3-44f2-9646-c51aba1c5706-kube-api-access-hptph\") pod \"0e421f2b-ccc3-44f2-9646-c51aba1c5706\" (UID: \"0e421f2b-ccc3-44f2-9646-c51aba1c5706\") " Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.012128 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0e421f2b-ccc3-44f2-9646-c51aba1c5706-operator-scripts\") pod \"0e421f2b-ccc3-44f2-9646-c51aba1c5706\" (UID: \"0e421f2b-ccc3-44f2-9646-c51aba1c5706\") " Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.012630 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0e421f2b-ccc3-44f2-9646-c51aba1c5706-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0e421f2b-ccc3-44f2-9646-c51aba1c5706" (UID: "0e421f2b-ccc3-44f2-9646-c51aba1c5706"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.013556 5037 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0e421f2b-ccc3-44f2-9646-c51aba1c5706-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.016414 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e421f2b-ccc3-44f2-9646-c51aba1c5706-kube-api-access-hptph" (OuterVolumeSpecName: "kube-api-access-hptph") pod "0e421f2b-ccc3-44f2-9646-c51aba1c5706" (UID: "0e421f2b-ccc3-44f2-9646-c51aba1c5706"). InnerVolumeSpecName "kube-api-access-hptph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.093019 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-bvnrk" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.099403 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-4d25-account-create-update-rrt49" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.108082 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-hsb4f" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.121363 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hptph\" (UniqueName: \"kubernetes.io/projected/0e421f2b-ccc3-44f2-9646-c51aba1c5706-kube-api-access-hptph\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.130913 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-259c-account-create-update-jrh9b" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.133732 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-c2fe-account-create-update-r7tgg" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.222728 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8xlss\" (UniqueName: \"kubernetes.io/projected/a03d1e71-eb0c-4ec4-8d33-39535460bc50-kube-api-access-8xlss\") pod \"a03d1e71-eb0c-4ec4-8d33-39535460bc50\" (UID: \"a03d1e71-eb0c-4ec4-8d33-39535460bc50\") " Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.222823 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h826t\" (UniqueName: \"kubernetes.io/projected/ec74497e-0217-404a-8cb1-510ccc6cba50-kube-api-access-h826t\") pod \"ec74497e-0217-404a-8cb1-510ccc6cba50\" (UID: \"ec74497e-0217-404a-8cb1-510ccc6cba50\") " Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.222855 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gr4js\" (UniqueName: \"kubernetes.io/projected/30f3badb-48be-4d24-8f2a-7a3622f0f720-kube-api-access-gr4js\") pod \"30f3badb-48be-4d24-8f2a-7a3622f0f720\" (UID: \"30f3badb-48be-4d24-8f2a-7a3622f0f720\") " Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.222896 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ec74497e-0217-404a-8cb1-510ccc6cba50-operator-scripts\") pod \"ec74497e-0217-404a-8cb1-510ccc6cba50\" (UID: \"ec74497e-0217-404a-8cb1-510ccc6cba50\") " Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.222961 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m6946\" (UniqueName: \"kubernetes.io/projected/6520b35e-439b-4178-ad5e-9312d57c0fc5-kube-api-access-m6946\") pod \"6520b35e-439b-4178-ad5e-9312d57c0fc5\" (UID: \"6520b35e-439b-4178-ad5e-9312d57c0fc5\") " Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.222997 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6520b35e-439b-4178-ad5e-9312d57c0fc5-operator-scripts\") pod \"6520b35e-439b-4178-ad5e-9312d57c0fc5\" (UID: \"6520b35e-439b-4178-ad5e-9312d57c0fc5\") " Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.223060 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/30f3badb-48be-4d24-8f2a-7a3622f0f720-operator-scripts\") pod \"30f3badb-48be-4d24-8f2a-7a3622f0f720\" (UID: \"30f3badb-48be-4d24-8f2a-7a3622f0f720\") " Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.223092 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a03d1e71-eb0c-4ec4-8d33-39535460bc50-operator-scripts\") pod \"a03d1e71-eb0c-4ec4-8d33-39535460bc50\" (UID: \"a03d1e71-eb0c-4ec4-8d33-39535460bc50\") " Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.223831 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6520b35e-439b-4178-ad5e-9312d57c0fc5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6520b35e-439b-4178-ad5e-9312d57c0fc5" (UID: "6520b35e-439b-4178-ad5e-9312d57c0fc5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.223583 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ec74497e-0217-404a-8cb1-510ccc6cba50-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ec74497e-0217-404a-8cb1-510ccc6cba50" (UID: "ec74497e-0217-404a-8cb1-510ccc6cba50"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.223993 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a03d1e71-eb0c-4ec4-8d33-39535460bc50-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a03d1e71-eb0c-4ec4-8d33-39535460bc50" (UID: "a03d1e71-eb0c-4ec4-8d33-39535460bc50"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.224347 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/30f3badb-48be-4d24-8f2a-7a3622f0f720-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "30f3badb-48be-4d24-8f2a-7a3622f0f720" (UID: "30f3badb-48be-4d24-8f2a-7a3622f0f720"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.226860 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6520b35e-439b-4178-ad5e-9312d57c0fc5-kube-api-access-m6946" (OuterVolumeSpecName: "kube-api-access-m6946") pod "6520b35e-439b-4178-ad5e-9312d57c0fc5" (UID: "6520b35e-439b-4178-ad5e-9312d57c0fc5"). InnerVolumeSpecName "kube-api-access-m6946". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.228392 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a03d1e71-eb0c-4ec4-8d33-39535460bc50-kube-api-access-8xlss" (OuterVolumeSpecName: "kube-api-access-8xlss") pod "a03d1e71-eb0c-4ec4-8d33-39535460bc50" (UID: "a03d1e71-eb0c-4ec4-8d33-39535460bc50"). InnerVolumeSpecName "kube-api-access-8xlss". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.229487 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/30f3badb-48be-4d24-8f2a-7a3622f0f720-kube-api-access-gr4js" (OuterVolumeSpecName: "kube-api-access-gr4js") pod "30f3badb-48be-4d24-8f2a-7a3622f0f720" (UID: "30f3badb-48be-4d24-8f2a-7a3622f0f720"). InnerVolumeSpecName "kube-api-access-gr4js". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.229578 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec74497e-0217-404a-8cb1-510ccc6cba50-kube-api-access-h826t" (OuterVolumeSpecName: "kube-api-access-h826t") pod "ec74497e-0217-404a-8cb1-510ccc6cba50" (UID: "ec74497e-0217-404a-8cb1-510ccc6cba50"). InnerVolumeSpecName "kube-api-access-h826t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.324958 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f776q\" (UniqueName: \"kubernetes.io/projected/a9f650aa-da8e-4fe4-ab8f-980adc19129a-kube-api-access-f776q\") pod \"a9f650aa-da8e-4fe4-ab8f-980adc19129a\" (UID: \"a9f650aa-da8e-4fe4-ab8f-980adc19129a\") " Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.325136 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a9f650aa-da8e-4fe4-ab8f-980adc19129a-operator-scripts\") pod \"a9f650aa-da8e-4fe4-ab8f-980adc19129a\" (UID: \"a9f650aa-da8e-4fe4-ab8f-980adc19129a\") " Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.325593 5037 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/30f3badb-48be-4d24-8f2a-7a3622f0f720-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.325622 5037 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a03d1e71-eb0c-4ec4-8d33-39535460bc50-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.325641 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8xlss\" (UniqueName: \"kubernetes.io/projected/a03d1e71-eb0c-4ec4-8d33-39535460bc50-kube-api-access-8xlss\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.325656 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h826t\" (UniqueName: \"kubernetes.io/projected/ec74497e-0217-404a-8cb1-510ccc6cba50-kube-api-access-h826t\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.325667 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gr4js\" (UniqueName: \"kubernetes.io/projected/30f3badb-48be-4d24-8f2a-7a3622f0f720-kube-api-access-gr4js\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.325679 5037 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ec74497e-0217-404a-8cb1-510ccc6cba50-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.325677 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a9f650aa-da8e-4fe4-ab8f-980adc19129a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a9f650aa-da8e-4fe4-ab8f-980adc19129a" (UID: "a9f650aa-da8e-4fe4-ab8f-980adc19129a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.325690 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m6946\" (UniqueName: \"kubernetes.io/projected/6520b35e-439b-4178-ad5e-9312d57c0fc5-kube-api-access-m6946\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.325740 5037 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6520b35e-439b-4178-ad5e-9312d57c0fc5-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.327935 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a9f650aa-da8e-4fe4-ab8f-980adc19129a-kube-api-access-f776q" (OuterVolumeSpecName: "kube-api-access-f776q") pod "a9f650aa-da8e-4fe4-ab8f-980adc19129a" (UID: "a9f650aa-da8e-4fe4-ab8f-980adc19129a"). InnerVolumeSpecName "kube-api-access-f776q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.427255 5037 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a9f650aa-da8e-4fe4-ab8f-980adc19129a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.427309 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f776q\" (UniqueName: \"kubernetes.io/projected/a9f650aa-da8e-4fe4-ab8f-980adc19129a-kube-api-access-f776q\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.482340 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-c2fe-account-create-update-r7tgg" event={"ID":"a9f650aa-da8e-4fe4-ab8f-980adc19129a","Type":"ContainerDied","Data":"6c9a70abe692576794fd6e20c12672fcea695e95fce0700886e8f089e4443b90"} Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.482455 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6c9a70abe692576794fd6e20c12672fcea695e95fce0700886e8f089e4443b90" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.482418 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-c2fe-account-create-update-r7tgg" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.484751 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-bvnrk" event={"ID":"a03d1e71-eb0c-4ec4-8d33-39535460bc50","Type":"ContainerDied","Data":"fbfaeeba37aa51ace24fe8ea2ee6535dba2af75cfbeb785b67f1128bcef51c0c"} Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.484810 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fbfaeeba37aa51ace24fe8ea2ee6535dba2af75cfbeb785b67f1128bcef51c0c" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.484899 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-bvnrk" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.494389 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-259c-account-create-update-jrh9b" event={"ID":"6520b35e-439b-4178-ad5e-9312d57c0fc5","Type":"ContainerDied","Data":"026de11f1e366c1c3954db8d96e380a8442f9e41217aa23a07881785b2c20ec7"} Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.494421 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="026de11f1e366c1c3954db8d96e380a8442f9e41217aa23a07881785b2c20ec7" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.494471 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-259c-account-create-update-jrh9b" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.497635 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-4d25-account-create-update-rrt49" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.497672 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-4d25-account-create-update-rrt49" event={"ID":"ec74497e-0217-404a-8cb1-510ccc6cba50","Type":"ContainerDied","Data":"c7ef44af785d4822b364555ca5319d8f16a3d132aebbe2af46dca45d53b91cf0"} Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.497994 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c7ef44af785d4822b364555ca5319d8f16a3d132aebbe2af46dca45d53b91cf0" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.500103 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-hsb4f" event={"ID":"30f3badb-48be-4d24-8f2a-7a3622f0f720","Type":"ContainerDied","Data":"2095b8705d45333d3f925e71578f05464d5ad80f94f2983498569b0f2b9daedd"} Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.500143 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2095b8705d45333d3f925e71578f05464d5ad80f94f2983498569b0f2b9daedd" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.500195 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-hsb4f" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.509885 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-nrvrw" event={"ID":"0e421f2b-ccc3-44f2-9646-c51aba1c5706","Type":"ContainerDied","Data":"19003440a1bd9cf39c6951008fca27cfddc7559bb73a4ca239783df519f973be"} Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.509943 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="19003440a1bd9cf39c6951008fca27cfddc7559bb73a4ca239783df519f973be" Nov 26 14:39:20 crc kubenswrapper[5037]: I1126 14:39:20.509986 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-nrvrw" Nov 26 14:39:22 crc kubenswrapper[5037]: I1126 14:39:22.870783 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/10886f85-c800-4999-8c79-c490c60696cc-etc-swift\") pod \"swift-storage-0\" (UID: \"10886f85-c800-4999-8c79-c490c60696cc\") " pod="openstack/swift-storage-0" Nov 26 14:39:22 crc kubenswrapper[5037]: E1126 14:39:22.871079 5037 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 26 14:39:22 crc kubenswrapper[5037]: E1126 14:39:22.871118 5037 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 26 14:39:22 crc kubenswrapper[5037]: E1126 14:39:22.871212 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/10886f85-c800-4999-8c79-c490c60696cc-etc-swift podName:10886f85-c800-4999-8c79-c490c60696cc nodeName:}" failed. No retries permitted until 2025-11-26 14:39:38.871183707 +0000 UTC m=+1445.667953931 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/10886f85-c800-4999-8c79-c490c60696cc-etc-swift") pod "swift-storage-0" (UID: "10886f85-c800-4999-8c79-c490c60696cc") : configmap "swift-ring-files" not found Nov 26 14:39:24 crc kubenswrapper[5037]: I1126 14:39:24.544853 5037 generic.go:334] "Generic (PLEG): container finished" podID="35b26e94-ffdb-4ee2-9940-efa9d8fd74b8" containerID="cacc6b734cc7e2bff01ef129c3b33307509445bfd203135818102bd120e021b4" exitCode=0 Nov 26 14:39:24 crc kubenswrapper[5037]: I1126 14:39:24.544913 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-7j79r" event={"ID":"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8","Type":"ContainerDied","Data":"cacc6b734cc7e2bff01ef129c3b33307509445bfd203135818102bd120e021b4"} Nov 26 14:39:24 crc kubenswrapper[5037]: I1126 14:39:24.571955 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-mgn9v"] Nov 26 14:39:24 crc kubenswrapper[5037]: E1126 14:39:24.572299 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9f650aa-da8e-4fe4-ab8f-980adc19129a" containerName="mariadb-account-create-update" Nov 26 14:39:24 crc kubenswrapper[5037]: I1126 14:39:24.572315 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9f650aa-da8e-4fe4-ab8f-980adc19129a" containerName="mariadb-account-create-update" Nov 26 14:39:24 crc kubenswrapper[5037]: E1126 14:39:24.572324 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30f3badb-48be-4d24-8f2a-7a3622f0f720" containerName="mariadb-database-create" Nov 26 14:39:24 crc kubenswrapper[5037]: I1126 14:39:24.572332 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="30f3badb-48be-4d24-8f2a-7a3622f0f720" containerName="mariadb-database-create" Nov 26 14:39:24 crc kubenswrapper[5037]: E1126 14:39:24.572351 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e421f2b-ccc3-44f2-9646-c51aba1c5706" containerName="mariadb-database-create" Nov 26 14:39:24 crc kubenswrapper[5037]: I1126 14:39:24.572357 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e421f2b-ccc3-44f2-9646-c51aba1c5706" containerName="mariadb-database-create" Nov 26 14:39:24 crc kubenswrapper[5037]: E1126 14:39:24.572364 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec74497e-0217-404a-8cb1-510ccc6cba50" containerName="mariadb-account-create-update" Nov 26 14:39:24 crc kubenswrapper[5037]: I1126 14:39:24.572369 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec74497e-0217-404a-8cb1-510ccc6cba50" containerName="mariadb-account-create-update" Nov 26 14:39:24 crc kubenswrapper[5037]: E1126 14:39:24.572388 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a03d1e71-eb0c-4ec4-8d33-39535460bc50" containerName="mariadb-database-create" Nov 26 14:39:24 crc kubenswrapper[5037]: I1126 14:39:24.572393 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="a03d1e71-eb0c-4ec4-8d33-39535460bc50" containerName="mariadb-database-create" Nov 26 14:39:24 crc kubenswrapper[5037]: E1126 14:39:24.572406 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="073bdcf3-16fa-4b27-8a82-709bf0e1bf1c" containerName="init" Nov 26 14:39:24 crc kubenswrapper[5037]: I1126 14:39:24.572412 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="073bdcf3-16fa-4b27-8a82-709bf0e1bf1c" containerName="init" Nov 26 14:39:24 crc kubenswrapper[5037]: E1126 14:39:24.572422 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="073bdcf3-16fa-4b27-8a82-709bf0e1bf1c" containerName="dnsmasq-dns" Nov 26 14:39:24 crc kubenswrapper[5037]: I1126 14:39:24.572428 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="073bdcf3-16fa-4b27-8a82-709bf0e1bf1c" containerName="dnsmasq-dns" Nov 26 14:39:24 crc kubenswrapper[5037]: E1126 14:39:24.572436 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6520b35e-439b-4178-ad5e-9312d57c0fc5" containerName="mariadb-account-create-update" Nov 26 14:39:24 crc kubenswrapper[5037]: I1126 14:39:24.572444 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="6520b35e-439b-4178-ad5e-9312d57c0fc5" containerName="mariadb-account-create-update" Nov 26 14:39:24 crc kubenswrapper[5037]: I1126 14:39:24.572604 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="073bdcf3-16fa-4b27-8a82-709bf0e1bf1c" containerName="dnsmasq-dns" Nov 26 14:39:24 crc kubenswrapper[5037]: I1126 14:39:24.572634 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9f650aa-da8e-4fe4-ab8f-980adc19129a" containerName="mariadb-account-create-update" Nov 26 14:39:24 crc kubenswrapper[5037]: I1126 14:39:24.572648 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="6520b35e-439b-4178-ad5e-9312d57c0fc5" containerName="mariadb-account-create-update" Nov 26 14:39:24 crc kubenswrapper[5037]: I1126 14:39:24.572667 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec74497e-0217-404a-8cb1-510ccc6cba50" containerName="mariadb-account-create-update" Nov 26 14:39:24 crc kubenswrapper[5037]: I1126 14:39:24.572675 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="a03d1e71-eb0c-4ec4-8d33-39535460bc50" containerName="mariadb-database-create" Nov 26 14:39:24 crc kubenswrapper[5037]: I1126 14:39:24.572692 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="30f3badb-48be-4d24-8f2a-7a3622f0f720" containerName="mariadb-database-create" Nov 26 14:39:24 crc kubenswrapper[5037]: I1126 14:39:24.572700 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e421f2b-ccc3-44f2-9646-c51aba1c5706" containerName="mariadb-database-create" Nov 26 14:39:24 crc kubenswrapper[5037]: I1126 14:39:24.573230 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-mgn9v" Nov 26 14:39:24 crc kubenswrapper[5037]: I1126 14:39:24.575859 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 26 14:39:24 crc kubenswrapper[5037]: I1126 14:39:24.576020 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-6flkn" Nov 26 14:39:24 crc kubenswrapper[5037]: I1126 14:39:24.607609 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vnp75\" (UniqueName: \"kubernetes.io/projected/2369115c-ae08-42b0-af64-c42191c04502-kube-api-access-vnp75\") pod \"glance-db-sync-mgn9v\" (UID: \"2369115c-ae08-42b0-af64-c42191c04502\") " pod="openstack/glance-db-sync-mgn9v" Nov 26 14:39:24 crc kubenswrapper[5037]: I1126 14:39:24.607722 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2369115c-ae08-42b0-af64-c42191c04502-db-sync-config-data\") pod \"glance-db-sync-mgn9v\" (UID: \"2369115c-ae08-42b0-af64-c42191c04502\") " pod="openstack/glance-db-sync-mgn9v" Nov 26 14:39:24 crc kubenswrapper[5037]: I1126 14:39:24.607770 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2369115c-ae08-42b0-af64-c42191c04502-config-data\") pod \"glance-db-sync-mgn9v\" (UID: \"2369115c-ae08-42b0-af64-c42191c04502\") " pod="openstack/glance-db-sync-mgn9v" Nov 26 14:39:24 crc kubenswrapper[5037]: I1126 14:39:24.607807 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2369115c-ae08-42b0-af64-c42191c04502-combined-ca-bundle\") pod \"glance-db-sync-mgn9v\" (UID: \"2369115c-ae08-42b0-af64-c42191c04502\") " pod="openstack/glance-db-sync-mgn9v" Nov 26 14:39:24 crc kubenswrapper[5037]: I1126 14:39:24.608591 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-mgn9v"] Nov 26 14:39:24 crc kubenswrapper[5037]: I1126 14:39:24.708744 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2369115c-ae08-42b0-af64-c42191c04502-db-sync-config-data\") pod \"glance-db-sync-mgn9v\" (UID: \"2369115c-ae08-42b0-af64-c42191c04502\") " pod="openstack/glance-db-sync-mgn9v" Nov 26 14:39:24 crc kubenswrapper[5037]: I1126 14:39:24.708821 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2369115c-ae08-42b0-af64-c42191c04502-config-data\") pod \"glance-db-sync-mgn9v\" (UID: \"2369115c-ae08-42b0-af64-c42191c04502\") " pod="openstack/glance-db-sync-mgn9v" Nov 26 14:39:24 crc kubenswrapper[5037]: I1126 14:39:24.708861 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2369115c-ae08-42b0-af64-c42191c04502-combined-ca-bundle\") pod \"glance-db-sync-mgn9v\" (UID: \"2369115c-ae08-42b0-af64-c42191c04502\") " pod="openstack/glance-db-sync-mgn9v" Nov 26 14:39:24 crc kubenswrapper[5037]: I1126 14:39:24.709047 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vnp75\" (UniqueName: \"kubernetes.io/projected/2369115c-ae08-42b0-af64-c42191c04502-kube-api-access-vnp75\") pod \"glance-db-sync-mgn9v\" (UID: \"2369115c-ae08-42b0-af64-c42191c04502\") " pod="openstack/glance-db-sync-mgn9v" Nov 26 14:39:24 crc kubenswrapper[5037]: I1126 14:39:24.718248 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2369115c-ae08-42b0-af64-c42191c04502-db-sync-config-data\") pod \"glance-db-sync-mgn9v\" (UID: \"2369115c-ae08-42b0-af64-c42191c04502\") " pod="openstack/glance-db-sync-mgn9v" Nov 26 14:39:24 crc kubenswrapper[5037]: I1126 14:39:24.719806 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2369115c-ae08-42b0-af64-c42191c04502-combined-ca-bundle\") pod \"glance-db-sync-mgn9v\" (UID: \"2369115c-ae08-42b0-af64-c42191c04502\") " pod="openstack/glance-db-sync-mgn9v" Nov 26 14:39:24 crc kubenswrapper[5037]: I1126 14:39:24.739599 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vnp75\" (UniqueName: \"kubernetes.io/projected/2369115c-ae08-42b0-af64-c42191c04502-kube-api-access-vnp75\") pod \"glance-db-sync-mgn9v\" (UID: \"2369115c-ae08-42b0-af64-c42191c04502\") " pod="openstack/glance-db-sync-mgn9v" Nov 26 14:39:24 crc kubenswrapper[5037]: I1126 14:39:24.742740 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2369115c-ae08-42b0-af64-c42191c04502-config-data\") pod \"glance-db-sync-mgn9v\" (UID: \"2369115c-ae08-42b0-af64-c42191c04502\") " pod="openstack/glance-db-sync-mgn9v" Nov 26 14:39:24 crc kubenswrapper[5037]: I1126 14:39:24.898690 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-mgn9v" Nov 26 14:39:25 crc kubenswrapper[5037]: I1126 14:39:25.414145 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-mgn9v"] Nov 26 14:39:25 crc kubenswrapper[5037]: W1126 14:39:25.421524 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2369115c_ae08_42b0_af64_c42191c04502.slice/crio-b5df84b55d88403cf2b629f4b47dc681b4f9e39ad9f86bafa991315acb3bca72 WatchSource:0}: Error finding container b5df84b55d88403cf2b629f4b47dc681b4f9e39ad9f86bafa991315acb3bca72: Status 404 returned error can't find the container with id b5df84b55d88403cf2b629f4b47dc681b4f9e39ad9f86bafa991315acb3bca72 Nov 26 14:39:25 crc kubenswrapper[5037]: I1126 14:39:25.552668 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-mgn9v" event={"ID":"2369115c-ae08-42b0-af64-c42191c04502","Type":"ContainerStarted","Data":"b5df84b55d88403cf2b629f4b47dc681b4f9e39ad9f86bafa991315acb3bca72"} Nov 26 14:39:25 crc kubenswrapper[5037]: I1126 14:39:25.826648 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-ptz2q" podUID="6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf" containerName="ovn-controller" probeResult="failure" output=< Nov 26 14:39:25 crc kubenswrapper[5037]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 26 14:39:25 crc kubenswrapper[5037]: > Nov 26 14:39:25 crc kubenswrapper[5037]: I1126 14:39:25.832258 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-7j79r" Nov 26 14:39:25 crc kubenswrapper[5037]: I1126 14:39:25.843051 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-264cs" Nov 26 14:39:25 crc kubenswrapper[5037]: I1126 14:39:25.867419 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-264cs" Nov 26 14:39:25 crc kubenswrapper[5037]: I1126 14:39:25.927895 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-combined-ca-bundle\") pod \"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8\" (UID: \"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8\") " Nov 26 14:39:25 crc kubenswrapper[5037]: I1126 14:39:25.927929 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-etc-swift\") pod \"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8\" (UID: \"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8\") " Nov 26 14:39:25 crc kubenswrapper[5037]: I1126 14:39:25.927947 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-ring-data-devices\") pod \"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8\" (UID: \"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8\") " Nov 26 14:39:25 crc kubenswrapper[5037]: I1126 14:39:25.927983 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-dispersionconf\") pod \"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8\" (UID: \"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8\") " Nov 26 14:39:25 crc kubenswrapper[5037]: I1126 14:39:25.928008 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sc5sv\" (UniqueName: \"kubernetes.io/projected/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-kube-api-access-sc5sv\") pod \"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8\" (UID: \"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8\") " Nov 26 14:39:25 crc kubenswrapper[5037]: I1126 14:39:25.928044 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-swiftconf\") pod \"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8\" (UID: \"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8\") " Nov 26 14:39:25 crc kubenswrapper[5037]: I1126 14:39:25.928061 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-scripts\") pod \"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8\" (UID: \"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8\") " Nov 26 14:39:25 crc kubenswrapper[5037]: I1126 14:39:25.929735 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "35b26e94-ffdb-4ee2-9940-efa9d8fd74b8" (UID: "35b26e94-ffdb-4ee2-9940-efa9d8fd74b8"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:39:25 crc kubenswrapper[5037]: I1126 14:39:25.930189 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "35b26e94-ffdb-4ee2-9940-efa9d8fd74b8" (UID: "35b26e94-ffdb-4ee2-9940-efa9d8fd74b8"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:39:25 crc kubenswrapper[5037]: I1126 14:39:25.936591 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "35b26e94-ffdb-4ee2-9940-efa9d8fd74b8" (UID: "35b26e94-ffdb-4ee2-9940-efa9d8fd74b8"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:39:25 crc kubenswrapper[5037]: I1126 14:39:25.938063 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-kube-api-access-sc5sv" (OuterVolumeSpecName: "kube-api-access-sc5sv") pod "35b26e94-ffdb-4ee2-9940-efa9d8fd74b8" (UID: "35b26e94-ffdb-4ee2-9940-efa9d8fd74b8"). InnerVolumeSpecName "kube-api-access-sc5sv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:39:25 crc kubenswrapper[5037]: I1126 14:39:25.953171 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "35b26e94-ffdb-4ee2-9940-efa9d8fd74b8" (UID: "35b26e94-ffdb-4ee2-9940-efa9d8fd74b8"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:39:25 crc kubenswrapper[5037]: I1126 14:39:25.955238 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "35b26e94-ffdb-4ee2-9940-efa9d8fd74b8" (UID: "35b26e94-ffdb-4ee2-9940-efa9d8fd74b8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:39:25 crc kubenswrapper[5037]: I1126 14:39:25.955850 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-scripts" (OuterVolumeSpecName: "scripts") pod "35b26e94-ffdb-4ee2-9940-efa9d8fd74b8" (UID: "35b26e94-ffdb-4ee2-9940-efa9d8fd74b8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.029748 5037 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.029788 5037 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.029801 5037 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.029812 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sc5sv\" (UniqueName: \"kubernetes.io/projected/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-kube-api-access-sc5sv\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.029825 5037 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.029837 5037 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.029848 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.075112 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ptz2q-config-gx4bv"] Nov 26 14:39:26 crc kubenswrapper[5037]: E1126 14:39:26.075572 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35b26e94-ffdb-4ee2-9940-efa9d8fd74b8" containerName="swift-ring-rebalance" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.075594 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="35b26e94-ffdb-4ee2-9940-efa9d8fd74b8" containerName="swift-ring-rebalance" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.075809 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="35b26e94-ffdb-4ee2-9940-efa9d8fd74b8" containerName="swift-ring-rebalance" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.076573 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ptz2q-config-gx4bv" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.078759 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.104141 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ptz2q-config-gx4bv"] Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.234046 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-48gk7\" (UniqueName: \"kubernetes.io/projected/5648241d-e658-4dce-86b1-f2b79ea11c56-kube-api-access-48gk7\") pod \"ovn-controller-ptz2q-config-gx4bv\" (UID: \"5648241d-e658-4dce-86b1-f2b79ea11c56\") " pod="openstack/ovn-controller-ptz2q-config-gx4bv" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.234104 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5648241d-e658-4dce-86b1-f2b79ea11c56-var-log-ovn\") pod \"ovn-controller-ptz2q-config-gx4bv\" (UID: \"5648241d-e658-4dce-86b1-f2b79ea11c56\") " pod="openstack/ovn-controller-ptz2q-config-gx4bv" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.234162 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5648241d-e658-4dce-86b1-f2b79ea11c56-additional-scripts\") pod \"ovn-controller-ptz2q-config-gx4bv\" (UID: \"5648241d-e658-4dce-86b1-f2b79ea11c56\") " pod="openstack/ovn-controller-ptz2q-config-gx4bv" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.234202 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5648241d-e658-4dce-86b1-f2b79ea11c56-var-run\") pod \"ovn-controller-ptz2q-config-gx4bv\" (UID: \"5648241d-e658-4dce-86b1-f2b79ea11c56\") " pod="openstack/ovn-controller-ptz2q-config-gx4bv" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.234295 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5648241d-e658-4dce-86b1-f2b79ea11c56-var-run-ovn\") pod \"ovn-controller-ptz2q-config-gx4bv\" (UID: \"5648241d-e658-4dce-86b1-f2b79ea11c56\") " pod="openstack/ovn-controller-ptz2q-config-gx4bv" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.234328 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5648241d-e658-4dce-86b1-f2b79ea11c56-scripts\") pod \"ovn-controller-ptz2q-config-gx4bv\" (UID: \"5648241d-e658-4dce-86b1-f2b79ea11c56\") " pod="openstack/ovn-controller-ptz2q-config-gx4bv" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.261858 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-v86d4"] Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.263890 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v86d4" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.280115 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-v86d4"] Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.336361 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5648241d-e658-4dce-86b1-f2b79ea11c56-var-run-ovn\") pod \"ovn-controller-ptz2q-config-gx4bv\" (UID: \"5648241d-e658-4dce-86b1-f2b79ea11c56\") " pod="openstack/ovn-controller-ptz2q-config-gx4bv" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.336742 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5648241d-e658-4dce-86b1-f2b79ea11c56-scripts\") pod \"ovn-controller-ptz2q-config-gx4bv\" (UID: \"5648241d-e658-4dce-86b1-f2b79ea11c56\") " pod="openstack/ovn-controller-ptz2q-config-gx4bv" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.336800 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-48gk7\" (UniqueName: \"kubernetes.io/projected/5648241d-e658-4dce-86b1-f2b79ea11c56-kube-api-access-48gk7\") pod \"ovn-controller-ptz2q-config-gx4bv\" (UID: \"5648241d-e658-4dce-86b1-f2b79ea11c56\") " pod="openstack/ovn-controller-ptz2q-config-gx4bv" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.336838 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5648241d-e658-4dce-86b1-f2b79ea11c56-var-log-ovn\") pod \"ovn-controller-ptz2q-config-gx4bv\" (UID: \"5648241d-e658-4dce-86b1-f2b79ea11c56\") " pod="openstack/ovn-controller-ptz2q-config-gx4bv" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.336878 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5648241d-e658-4dce-86b1-f2b79ea11c56-additional-scripts\") pod \"ovn-controller-ptz2q-config-gx4bv\" (UID: \"5648241d-e658-4dce-86b1-f2b79ea11c56\") " pod="openstack/ovn-controller-ptz2q-config-gx4bv" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.336917 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5648241d-e658-4dce-86b1-f2b79ea11c56-var-run\") pod \"ovn-controller-ptz2q-config-gx4bv\" (UID: \"5648241d-e658-4dce-86b1-f2b79ea11c56\") " pod="openstack/ovn-controller-ptz2q-config-gx4bv" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.337246 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5648241d-e658-4dce-86b1-f2b79ea11c56-var-run\") pod \"ovn-controller-ptz2q-config-gx4bv\" (UID: \"5648241d-e658-4dce-86b1-f2b79ea11c56\") " pod="openstack/ovn-controller-ptz2q-config-gx4bv" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.337695 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5648241d-e658-4dce-86b1-f2b79ea11c56-var-run-ovn\") pod \"ovn-controller-ptz2q-config-gx4bv\" (UID: \"5648241d-e658-4dce-86b1-f2b79ea11c56\") " pod="openstack/ovn-controller-ptz2q-config-gx4bv" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.338452 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5648241d-e658-4dce-86b1-f2b79ea11c56-var-log-ovn\") pod \"ovn-controller-ptz2q-config-gx4bv\" (UID: \"5648241d-e658-4dce-86b1-f2b79ea11c56\") " pod="openstack/ovn-controller-ptz2q-config-gx4bv" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.339520 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5648241d-e658-4dce-86b1-f2b79ea11c56-additional-scripts\") pod \"ovn-controller-ptz2q-config-gx4bv\" (UID: \"5648241d-e658-4dce-86b1-f2b79ea11c56\") " pod="openstack/ovn-controller-ptz2q-config-gx4bv" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.343987 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5648241d-e658-4dce-86b1-f2b79ea11c56-scripts\") pod \"ovn-controller-ptz2q-config-gx4bv\" (UID: \"5648241d-e658-4dce-86b1-f2b79ea11c56\") " pod="openstack/ovn-controller-ptz2q-config-gx4bv" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.395894 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-48gk7\" (UniqueName: \"kubernetes.io/projected/5648241d-e658-4dce-86b1-f2b79ea11c56-kube-api-access-48gk7\") pod \"ovn-controller-ptz2q-config-gx4bv\" (UID: \"5648241d-e658-4dce-86b1-f2b79ea11c56\") " pod="openstack/ovn-controller-ptz2q-config-gx4bv" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.396665 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ptz2q-config-gx4bv" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.440980 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2fbll\" (UniqueName: \"kubernetes.io/projected/7aa59b69-f9b2-4125-b65c-4bc91f674de0-kube-api-access-2fbll\") pod \"redhat-operators-v86d4\" (UID: \"7aa59b69-f9b2-4125-b65c-4bc91f674de0\") " pod="openshift-marketplace/redhat-operators-v86d4" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.441030 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7aa59b69-f9b2-4125-b65c-4bc91f674de0-catalog-content\") pod \"redhat-operators-v86d4\" (UID: \"7aa59b69-f9b2-4125-b65c-4bc91f674de0\") " pod="openshift-marketplace/redhat-operators-v86d4" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.441142 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7aa59b69-f9b2-4125-b65c-4bc91f674de0-utilities\") pod \"redhat-operators-v86d4\" (UID: \"7aa59b69-f9b2-4125-b65c-4bc91f674de0\") " pod="openshift-marketplace/redhat-operators-v86d4" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.542252 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7aa59b69-f9b2-4125-b65c-4bc91f674de0-utilities\") pod \"redhat-operators-v86d4\" (UID: \"7aa59b69-f9b2-4125-b65c-4bc91f674de0\") " pod="openshift-marketplace/redhat-operators-v86d4" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.542483 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2fbll\" (UniqueName: \"kubernetes.io/projected/7aa59b69-f9b2-4125-b65c-4bc91f674de0-kube-api-access-2fbll\") pod \"redhat-operators-v86d4\" (UID: \"7aa59b69-f9b2-4125-b65c-4bc91f674de0\") " pod="openshift-marketplace/redhat-operators-v86d4" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.542546 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7aa59b69-f9b2-4125-b65c-4bc91f674de0-catalog-content\") pod \"redhat-operators-v86d4\" (UID: \"7aa59b69-f9b2-4125-b65c-4bc91f674de0\") " pod="openshift-marketplace/redhat-operators-v86d4" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.543072 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7aa59b69-f9b2-4125-b65c-4bc91f674de0-utilities\") pod \"redhat-operators-v86d4\" (UID: \"7aa59b69-f9b2-4125-b65c-4bc91f674de0\") " pod="openshift-marketplace/redhat-operators-v86d4" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.543153 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7aa59b69-f9b2-4125-b65c-4bc91f674de0-catalog-content\") pod \"redhat-operators-v86d4\" (UID: \"7aa59b69-f9b2-4125-b65c-4bc91f674de0\") " pod="openshift-marketplace/redhat-operators-v86d4" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.560205 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2fbll\" (UniqueName: \"kubernetes.io/projected/7aa59b69-f9b2-4125-b65c-4bc91f674de0-kube-api-access-2fbll\") pod \"redhat-operators-v86d4\" (UID: \"7aa59b69-f9b2-4125-b65c-4bc91f674de0\") " pod="openshift-marketplace/redhat-operators-v86d4" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.566886 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-7j79r" event={"ID":"35b26e94-ffdb-4ee2-9940-efa9d8fd74b8","Type":"ContainerDied","Data":"15bbe51c5842a1d0c6293aa1d46d89ab701c4595c9e01e6804638567b6049c31"} Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.566928 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="15bbe51c5842a1d0c6293aa1d46d89ab701c4595c9e01e6804638567b6049c31" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.566993 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-7j79r" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.584048 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v86d4" Nov 26 14:39:26 crc kubenswrapper[5037]: I1126 14:39:26.889596 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ptz2q-config-gx4bv"] Nov 26 14:39:27 crc kubenswrapper[5037]: I1126 14:39:27.108420 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-v86d4"] Nov 26 14:39:27 crc kubenswrapper[5037]: W1126 14:39:27.117314 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7aa59b69_f9b2_4125_b65c_4bc91f674de0.slice/crio-28904659bd6c9fe4597e5d454858a7f590cdf239c18d1dd20de6183faaf8987e WatchSource:0}: Error finding container 28904659bd6c9fe4597e5d454858a7f590cdf239c18d1dd20de6183faaf8987e: Status 404 returned error can't find the container with id 28904659bd6c9fe4597e5d454858a7f590cdf239c18d1dd20de6183faaf8987e Nov 26 14:39:27 crc kubenswrapper[5037]: I1126 14:39:27.581622 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ptz2q-config-gx4bv" event={"ID":"5648241d-e658-4dce-86b1-f2b79ea11c56","Type":"ContainerStarted","Data":"2800f7d3106987fa8cb86a64eef775428f5a97b3fb2e92fc9188245db77ac484"} Nov 26 14:39:27 crc kubenswrapper[5037]: I1126 14:39:27.581664 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ptz2q-config-gx4bv" event={"ID":"5648241d-e658-4dce-86b1-f2b79ea11c56","Type":"ContainerStarted","Data":"91603d3a79d263bc9e1c9df72c3ebbbf0863a258f74a3d374a38bdb3cdb52f5c"} Nov 26 14:39:27 crc kubenswrapper[5037]: I1126 14:39:27.588092 5037 generic.go:334] "Generic (PLEG): container finished" podID="7aa59b69-f9b2-4125-b65c-4bc91f674de0" containerID="1c6039fe945e7ca97d62d82a6078eefeafdf200b4af0da2ef7e366e27c34a966" exitCode=0 Nov 26 14:39:27 crc kubenswrapper[5037]: I1126 14:39:27.588137 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v86d4" event={"ID":"7aa59b69-f9b2-4125-b65c-4bc91f674de0","Type":"ContainerDied","Data":"1c6039fe945e7ca97d62d82a6078eefeafdf200b4af0da2ef7e366e27c34a966"} Nov 26 14:39:27 crc kubenswrapper[5037]: I1126 14:39:27.588162 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v86d4" event={"ID":"7aa59b69-f9b2-4125-b65c-4bc91f674de0","Type":"ContainerStarted","Data":"28904659bd6c9fe4597e5d454858a7f590cdf239c18d1dd20de6183faaf8987e"} Nov 26 14:39:27 crc kubenswrapper[5037]: I1126 14:39:27.599857 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ptz2q-config-gx4bv" podStartSLOduration=1.599841015 podStartE2EDuration="1.599841015s" podCreationTimestamp="2025-11-26 14:39:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:39:27.597812035 +0000 UTC m=+1434.394582219" watchObservedRunningTime="2025-11-26 14:39:27.599841015 +0000 UTC m=+1434.396611199" Nov 26 14:39:28 crc kubenswrapper[5037]: I1126 14:39:28.596835 5037 generic.go:334] "Generic (PLEG): container finished" podID="ba78b94a-32d0-4377-ac41-ffd036b241bf" containerID="773034796f31390fe28fdc58e1e871d5f541491426df6c51095f768444fbd35d" exitCode=0 Nov 26 14:39:28 crc kubenswrapper[5037]: I1126 14:39:28.596923 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"ba78b94a-32d0-4377-ac41-ffd036b241bf","Type":"ContainerDied","Data":"773034796f31390fe28fdc58e1e871d5f541491426df6c51095f768444fbd35d"} Nov 26 14:39:28 crc kubenswrapper[5037]: I1126 14:39:28.601187 5037 generic.go:334] "Generic (PLEG): container finished" podID="5648241d-e658-4dce-86b1-f2b79ea11c56" containerID="2800f7d3106987fa8cb86a64eef775428f5a97b3fb2e92fc9188245db77ac484" exitCode=0 Nov 26 14:39:28 crc kubenswrapper[5037]: I1126 14:39:28.601252 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ptz2q-config-gx4bv" event={"ID":"5648241d-e658-4dce-86b1-f2b79ea11c56","Type":"ContainerDied","Data":"2800f7d3106987fa8cb86a64eef775428f5a97b3fb2e92fc9188245db77ac484"} Nov 26 14:39:28 crc kubenswrapper[5037]: I1126 14:39:28.603563 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v86d4" event={"ID":"7aa59b69-f9b2-4125-b65c-4bc91f674de0","Type":"ContainerStarted","Data":"c4a0415a028733556d77f3ac62d5c56b65a6cfa300d69d557a9874edc57afe50"} Nov 26 14:39:29 crc kubenswrapper[5037]: I1126 14:39:29.619327 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"ba78b94a-32d0-4377-ac41-ffd036b241bf","Type":"ContainerStarted","Data":"74f68fdb96d374b8d9906137608e1412d3d306ce1e1daedf2234bd65a15de9cc"} Nov 26 14:39:29 crc kubenswrapper[5037]: I1126 14:39:29.619920 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 26 14:39:29 crc kubenswrapper[5037]: I1126 14:39:29.621914 5037 generic.go:334] "Generic (PLEG): container finished" podID="7aa59b69-f9b2-4125-b65c-4bc91f674de0" containerID="c4a0415a028733556d77f3ac62d5c56b65a6cfa300d69d557a9874edc57afe50" exitCode=0 Nov 26 14:39:29 crc kubenswrapper[5037]: I1126 14:39:29.622006 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v86d4" event={"ID":"7aa59b69-f9b2-4125-b65c-4bc91f674de0","Type":"ContainerDied","Data":"c4a0415a028733556d77f3ac62d5c56b65a6cfa300d69d557a9874edc57afe50"} Nov 26 14:39:29 crc kubenswrapper[5037]: I1126 14:39:29.667945 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=36.98405466 podStartE2EDuration="1m20.667927633s" podCreationTimestamp="2025-11-26 14:38:09 +0000 UTC" firstStartedPulling="2025-11-26 14:38:11.231818812 +0000 UTC m=+1358.028588996" lastFinishedPulling="2025-11-26 14:38:54.915691785 +0000 UTC m=+1401.712461969" observedRunningTime="2025-11-26 14:39:29.64815951 +0000 UTC m=+1436.444929704" watchObservedRunningTime="2025-11-26 14:39:29.667927633 +0000 UTC m=+1436.464697807" Nov 26 14:39:29 crc kubenswrapper[5037]: I1126 14:39:29.925130 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ptz2q-config-gx4bv" Nov 26 14:39:30 crc kubenswrapper[5037]: I1126 14:39:30.103947 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5648241d-e658-4dce-86b1-f2b79ea11c56-scripts\") pod \"5648241d-e658-4dce-86b1-f2b79ea11c56\" (UID: \"5648241d-e658-4dce-86b1-f2b79ea11c56\") " Nov 26 14:39:30 crc kubenswrapper[5037]: I1126 14:39:30.104015 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5648241d-e658-4dce-86b1-f2b79ea11c56-var-log-ovn\") pod \"5648241d-e658-4dce-86b1-f2b79ea11c56\" (UID: \"5648241d-e658-4dce-86b1-f2b79ea11c56\") " Nov 26 14:39:30 crc kubenswrapper[5037]: I1126 14:39:30.104098 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-48gk7\" (UniqueName: \"kubernetes.io/projected/5648241d-e658-4dce-86b1-f2b79ea11c56-kube-api-access-48gk7\") pod \"5648241d-e658-4dce-86b1-f2b79ea11c56\" (UID: \"5648241d-e658-4dce-86b1-f2b79ea11c56\") " Nov 26 14:39:30 crc kubenswrapper[5037]: I1126 14:39:30.104175 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5648241d-e658-4dce-86b1-f2b79ea11c56-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "5648241d-e658-4dce-86b1-f2b79ea11c56" (UID: "5648241d-e658-4dce-86b1-f2b79ea11c56"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:39:30 crc kubenswrapper[5037]: I1126 14:39:30.104240 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5648241d-e658-4dce-86b1-f2b79ea11c56-var-run-ovn\") pod \"5648241d-e658-4dce-86b1-f2b79ea11c56\" (UID: \"5648241d-e658-4dce-86b1-f2b79ea11c56\") " Nov 26 14:39:30 crc kubenswrapper[5037]: I1126 14:39:30.104270 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5648241d-e658-4dce-86b1-f2b79ea11c56-var-run\") pod \"5648241d-e658-4dce-86b1-f2b79ea11c56\" (UID: \"5648241d-e658-4dce-86b1-f2b79ea11c56\") " Nov 26 14:39:30 crc kubenswrapper[5037]: I1126 14:39:30.104315 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5648241d-e658-4dce-86b1-f2b79ea11c56-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "5648241d-e658-4dce-86b1-f2b79ea11c56" (UID: "5648241d-e658-4dce-86b1-f2b79ea11c56"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:39:30 crc kubenswrapper[5037]: I1126 14:39:30.104422 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5648241d-e658-4dce-86b1-f2b79ea11c56-var-run" (OuterVolumeSpecName: "var-run") pod "5648241d-e658-4dce-86b1-f2b79ea11c56" (UID: "5648241d-e658-4dce-86b1-f2b79ea11c56"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:39:30 crc kubenswrapper[5037]: I1126 14:39:30.104495 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5648241d-e658-4dce-86b1-f2b79ea11c56-additional-scripts\") pod \"5648241d-e658-4dce-86b1-f2b79ea11c56\" (UID: \"5648241d-e658-4dce-86b1-f2b79ea11c56\") " Nov 26 14:39:30 crc kubenswrapper[5037]: I1126 14:39:30.105277 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5648241d-e658-4dce-86b1-f2b79ea11c56-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "5648241d-e658-4dce-86b1-f2b79ea11c56" (UID: "5648241d-e658-4dce-86b1-f2b79ea11c56"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:39:30 crc kubenswrapper[5037]: I1126 14:39:30.105551 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5648241d-e658-4dce-86b1-f2b79ea11c56-scripts" (OuterVolumeSpecName: "scripts") pod "5648241d-e658-4dce-86b1-f2b79ea11c56" (UID: "5648241d-e658-4dce-86b1-f2b79ea11c56"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:39:30 crc kubenswrapper[5037]: I1126 14:39:30.106113 5037 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5648241d-e658-4dce-86b1-f2b79ea11c56-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:30 crc kubenswrapper[5037]: I1126 14:39:30.106137 5037 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5648241d-e658-4dce-86b1-f2b79ea11c56-var-run\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:30 crc kubenswrapper[5037]: I1126 14:39:30.106149 5037 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5648241d-e658-4dce-86b1-f2b79ea11c56-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:30 crc kubenswrapper[5037]: I1126 14:39:30.106162 5037 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5648241d-e658-4dce-86b1-f2b79ea11c56-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:30 crc kubenswrapper[5037]: I1126 14:39:30.106170 5037 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5648241d-e658-4dce-86b1-f2b79ea11c56-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:30 crc kubenswrapper[5037]: I1126 14:39:30.124618 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5648241d-e658-4dce-86b1-f2b79ea11c56-kube-api-access-48gk7" (OuterVolumeSpecName: "kube-api-access-48gk7") pod "5648241d-e658-4dce-86b1-f2b79ea11c56" (UID: "5648241d-e658-4dce-86b1-f2b79ea11c56"). InnerVolumeSpecName "kube-api-access-48gk7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:39:30 crc kubenswrapper[5037]: I1126 14:39:30.208245 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-48gk7\" (UniqueName: \"kubernetes.io/projected/5648241d-e658-4dce-86b1-f2b79ea11c56-kube-api-access-48gk7\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:30 crc kubenswrapper[5037]: I1126 14:39:30.652921 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ptz2q-config-gx4bv" Nov 26 14:39:30 crc kubenswrapper[5037]: I1126 14:39:30.655723 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ptz2q-config-gx4bv" event={"ID":"5648241d-e658-4dce-86b1-f2b79ea11c56","Type":"ContainerDied","Data":"91603d3a79d263bc9e1c9df72c3ebbbf0863a258f74a3d374a38bdb3cdb52f5c"} Nov 26 14:39:30 crc kubenswrapper[5037]: I1126 14:39:30.655877 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="91603d3a79d263bc9e1c9df72c3ebbbf0863a258f74a3d374a38bdb3cdb52f5c" Nov 26 14:39:30 crc kubenswrapper[5037]: I1126 14:39:30.679723 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-v86d4" podStartSLOduration=1.809644695 podStartE2EDuration="4.679700008s" podCreationTimestamp="2025-11-26 14:39:26 +0000 UTC" firstStartedPulling="2025-11-26 14:39:27.590565968 +0000 UTC m=+1434.387336152" lastFinishedPulling="2025-11-26 14:39:30.460621281 +0000 UTC m=+1437.257391465" observedRunningTime="2025-11-26 14:39:30.667548622 +0000 UTC m=+1437.464318806" watchObservedRunningTime="2025-11-26 14:39:30.679700008 +0000 UTC m=+1437.476470192" Nov 26 14:39:30 crc kubenswrapper[5037]: I1126 14:39:30.716297 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ptz2q-config-gx4bv"] Nov 26 14:39:30 crc kubenswrapper[5037]: I1126 14:39:30.732157 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-ptz2q-config-gx4bv"] Nov 26 14:39:30 crc kubenswrapper[5037]: I1126 14:39:30.833751 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ptz2q" Nov 26 14:39:31 crc kubenswrapper[5037]: I1126 14:39:31.665845 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v86d4" event={"ID":"7aa59b69-f9b2-4125-b65c-4bc91f674de0","Type":"ContainerStarted","Data":"448643d0c78e6566671aefa4f33188e18cdf04ab9991efec45bbadfcfa7af881"} Nov 26 14:39:31 crc kubenswrapper[5037]: I1126 14:39:31.919137 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5648241d-e658-4dce-86b1-f2b79ea11c56" path="/var/lib/kubelet/pods/5648241d-e658-4dce-86b1-f2b79ea11c56/volumes" Nov 26 14:39:34 crc kubenswrapper[5037]: I1126 14:39:34.694515 5037 generic.go:334] "Generic (PLEG): container finished" podID="7f05291f-1331-411b-9971-c71218d11a35" containerID="1349d55b286187786ca0c0752c21570fa4516f326b8465f8b5bb44574d1252f9" exitCode=0 Nov 26 14:39:34 crc kubenswrapper[5037]: I1126 14:39:34.694588 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"7f05291f-1331-411b-9971-c71218d11a35","Type":"ContainerDied","Data":"1349d55b286187786ca0c0752c21570fa4516f326b8465f8b5bb44574d1252f9"} Nov 26 14:39:36 crc kubenswrapper[5037]: I1126 14:39:36.585145 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-v86d4" Nov 26 14:39:36 crc kubenswrapper[5037]: I1126 14:39:36.585502 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-v86d4" Nov 26 14:39:36 crc kubenswrapper[5037]: I1126 14:39:36.629986 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-v86d4" Nov 26 14:39:36 crc kubenswrapper[5037]: I1126 14:39:36.769454 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-v86d4" Nov 26 14:39:38 crc kubenswrapper[5037]: I1126 14:39:38.944494 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/10886f85-c800-4999-8c79-c490c60696cc-etc-swift\") pod \"swift-storage-0\" (UID: \"10886f85-c800-4999-8c79-c490c60696cc\") " pod="openstack/swift-storage-0" Nov 26 14:39:38 crc kubenswrapper[5037]: I1126 14:39:38.952313 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/10886f85-c800-4999-8c79-c490c60696cc-etc-swift\") pod \"swift-storage-0\" (UID: \"10886f85-c800-4999-8c79-c490c60696cc\") " pod="openstack/swift-storage-0" Nov 26 14:39:39 crc kubenswrapper[5037]: I1126 14:39:39.149965 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 26 14:39:40 crc kubenswrapper[5037]: I1126 14:39:40.045032 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-v86d4"] Nov 26 14:39:40 crc kubenswrapper[5037]: I1126 14:39:40.045634 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-v86d4" podUID="7aa59b69-f9b2-4125-b65c-4bc91f674de0" containerName="registry-server" containerID="cri-o://448643d0c78e6566671aefa4f33188e18cdf04ab9991efec45bbadfcfa7af881" gracePeriod=2 Nov 26 14:39:40 crc kubenswrapper[5037]: E1126 14:39:40.567020 5037 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-glance-api@sha256:26bd7b0bd6070856aefef6fe754c547d55c056396ea30d879d34c2d49b5a1d29" Nov 26 14:39:40 crc kubenswrapper[5037]: E1126 14:39:40.567218 5037 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:glance-db-sync,Image:quay.io/podified-antelope-centos9/openstack-glance-api@sha256:26bd7b0bd6070856aefef6fe754c547d55c056396ea30d879d34c2d49b5a1d29,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/glance/glance.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-vnp75,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42415,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42415,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-db-sync-mgn9v_openstack(2369115c-ae08-42b0-af64-c42191c04502): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 14:39:40 crc kubenswrapper[5037]: E1126 14:39:40.568449 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/glance-db-sync-mgn9v" podUID="2369115c-ae08-42b0-af64-c42191c04502" Nov 26 14:39:40 crc kubenswrapper[5037]: I1126 14:39:40.746601 5037 generic.go:334] "Generic (PLEG): container finished" podID="7aa59b69-f9b2-4125-b65c-4bc91f674de0" containerID="448643d0c78e6566671aefa4f33188e18cdf04ab9991efec45bbadfcfa7af881" exitCode=0 Nov 26 14:39:40 crc kubenswrapper[5037]: I1126 14:39:40.746821 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v86d4" event={"ID":"7aa59b69-f9b2-4125-b65c-4bc91f674de0","Type":"ContainerDied","Data":"448643d0c78e6566671aefa4f33188e18cdf04ab9991efec45bbadfcfa7af881"} Nov 26 14:39:40 crc kubenswrapper[5037]: E1126 14:39:40.749706 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-glance-api@sha256:26bd7b0bd6070856aefef6fe754c547d55c056396ea30d879d34c2d49b5a1d29\\\"\"" pod="openstack/glance-db-sync-mgn9v" podUID="2369115c-ae08-42b0-af64-c42191c04502" Nov 26 14:39:40 crc kubenswrapper[5037]: I1126 14:39:40.790914 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="ba78b94a-32d0-4377-ac41-ffd036b241bf" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.100:5671: connect: connection refused" Nov 26 14:39:40 crc kubenswrapper[5037]: I1126 14:39:40.887403 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v86d4" Nov 26 14:39:40 crc kubenswrapper[5037]: I1126 14:39:40.982645 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7aa59b69-f9b2-4125-b65c-4bc91f674de0-catalog-content\") pod \"7aa59b69-f9b2-4125-b65c-4bc91f674de0\" (UID: \"7aa59b69-f9b2-4125-b65c-4bc91f674de0\") " Nov 26 14:39:40 crc kubenswrapper[5037]: I1126 14:39:40.982884 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7aa59b69-f9b2-4125-b65c-4bc91f674de0-utilities\") pod \"7aa59b69-f9b2-4125-b65c-4bc91f674de0\" (UID: \"7aa59b69-f9b2-4125-b65c-4bc91f674de0\") " Nov 26 14:39:40 crc kubenswrapper[5037]: I1126 14:39:40.982947 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2fbll\" (UniqueName: \"kubernetes.io/projected/7aa59b69-f9b2-4125-b65c-4bc91f674de0-kube-api-access-2fbll\") pod \"7aa59b69-f9b2-4125-b65c-4bc91f674de0\" (UID: \"7aa59b69-f9b2-4125-b65c-4bc91f674de0\") " Nov 26 14:39:40 crc kubenswrapper[5037]: I1126 14:39:40.984034 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7aa59b69-f9b2-4125-b65c-4bc91f674de0-utilities" (OuterVolumeSpecName: "utilities") pod "7aa59b69-f9b2-4125-b65c-4bc91f674de0" (UID: "7aa59b69-f9b2-4125-b65c-4bc91f674de0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:39:40 crc kubenswrapper[5037]: I1126 14:39:40.989511 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7aa59b69-f9b2-4125-b65c-4bc91f674de0-kube-api-access-2fbll" (OuterVolumeSpecName: "kube-api-access-2fbll") pod "7aa59b69-f9b2-4125-b65c-4bc91f674de0" (UID: "7aa59b69-f9b2-4125-b65c-4bc91f674de0"). InnerVolumeSpecName "kube-api-access-2fbll". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:39:41 crc kubenswrapper[5037]: I1126 14:39:41.068539 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7aa59b69-f9b2-4125-b65c-4bc91f674de0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7aa59b69-f9b2-4125-b65c-4bc91f674de0" (UID: "7aa59b69-f9b2-4125-b65c-4bc91f674de0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:39:41 crc kubenswrapper[5037]: I1126 14:39:41.085031 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7aa59b69-f9b2-4125-b65c-4bc91f674de0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:41 crc kubenswrapper[5037]: I1126 14:39:41.085430 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7aa59b69-f9b2-4125-b65c-4bc91f674de0-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:41 crc kubenswrapper[5037]: I1126 14:39:41.085445 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2fbll\" (UniqueName: \"kubernetes.io/projected/7aa59b69-f9b2-4125-b65c-4bc91f674de0-kube-api-access-2fbll\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:41 crc kubenswrapper[5037]: W1126 14:39:41.149316 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod10886f85_c800_4999_8c79_c490c60696cc.slice/crio-b80c78e74048f915f9c31fe4cb9091ed1e1e9a14388858dd2ca0a92d07227a6b WatchSource:0}: Error finding container b80c78e74048f915f9c31fe4cb9091ed1e1e9a14388858dd2ca0a92d07227a6b: Status 404 returned error can't find the container with id b80c78e74048f915f9c31fe4cb9091ed1e1e9a14388858dd2ca0a92d07227a6b Nov 26 14:39:41 crc kubenswrapper[5037]: I1126 14:39:41.150627 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 26 14:39:41 crc kubenswrapper[5037]: I1126 14:39:41.247694 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 14:39:41 crc kubenswrapper[5037]: I1126 14:39:41.247747 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 14:39:41 crc kubenswrapper[5037]: I1126 14:39:41.758622 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"10886f85-c800-4999-8c79-c490c60696cc","Type":"ContainerStarted","Data":"b80c78e74048f915f9c31fe4cb9091ed1e1e9a14388858dd2ca0a92d07227a6b"} Nov 26 14:39:41 crc kubenswrapper[5037]: I1126 14:39:41.761866 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"7f05291f-1331-411b-9971-c71218d11a35","Type":"ContainerStarted","Data":"4483535c43e875eaf8b876f0ce67748ccef6e9a8c9dba169d9e8c3b8043014ae"} Nov 26 14:39:41 crc kubenswrapper[5037]: I1126 14:39:41.763014 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:39:41 crc kubenswrapper[5037]: I1126 14:39:41.764267 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v86d4" event={"ID":"7aa59b69-f9b2-4125-b65c-4bc91f674de0","Type":"ContainerDied","Data":"28904659bd6c9fe4597e5d454858a7f590cdf239c18d1dd20de6183faaf8987e"} Nov 26 14:39:41 crc kubenswrapper[5037]: I1126 14:39:41.764317 5037 scope.go:117] "RemoveContainer" containerID="448643d0c78e6566671aefa4f33188e18cdf04ab9991efec45bbadfcfa7af881" Nov 26 14:39:41 crc kubenswrapper[5037]: I1126 14:39:41.764443 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v86d4" Nov 26 14:39:41 crc kubenswrapper[5037]: I1126 14:39:41.786801 5037 scope.go:117] "RemoveContainer" containerID="c4a0415a028733556d77f3ac62d5c56b65a6cfa300d69d557a9874edc57afe50" Nov 26 14:39:41 crc kubenswrapper[5037]: I1126 14:39:41.800756 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=-9223371944.054043 podStartE2EDuration="1m32.800733821s" podCreationTimestamp="2025-11-26 14:38:09 +0000 UTC" firstStartedPulling="2025-11-26 14:38:12.1532173 +0000 UTC m=+1358.949987484" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:39:41.793673519 +0000 UTC m=+1448.590443803" watchObservedRunningTime="2025-11-26 14:39:41.800733821 +0000 UTC m=+1448.597504015" Nov 26 14:39:41 crc kubenswrapper[5037]: I1126 14:39:41.819076 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-v86d4"] Nov 26 14:39:41 crc kubenswrapper[5037]: I1126 14:39:41.831985 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-v86d4"] Nov 26 14:39:41 crc kubenswrapper[5037]: I1126 14:39:41.845638 5037 scope.go:117] "RemoveContainer" containerID="1c6039fe945e7ca97d62d82a6078eefeafdf200b4af0da2ef7e366e27c34a966" Nov 26 14:39:41 crc kubenswrapper[5037]: I1126 14:39:41.929237 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7aa59b69-f9b2-4125-b65c-4bc91f674de0" path="/var/lib/kubelet/pods/7aa59b69-f9b2-4125-b65c-4bc91f674de0/volumes" Nov 26 14:39:42 crc kubenswrapper[5037]: I1126 14:39:42.776616 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"10886f85-c800-4999-8c79-c490c60696cc","Type":"ContainerStarted","Data":"6af1db545967ed1a4d63df5e069cefc5f2002414e3177a1c53b51f7542200023"} Nov 26 14:39:43 crc kubenswrapper[5037]: I1126 14:39:43.788057 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"10886f85-c800-4999-8c79-c490c60696cc","Type":"ContainerStarted","Data":"1830d485d70f2c4c16c972d8eb54d3d68060d42e9eb67b0f0be4a183511992c6"} Nov 26 14:39:43 crc kubenswrapper[5037]: I1126 14:39:43.788452 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"10886f85-c800-4999-8c79-c490c60696cc","Type":"ContainerStarted","Data":"9ad134020857e5330738626c90a057bc32ca98d01d16f8a94a600086e2df114c"} Nov 26 14:39:43 crc kubenswrapper[5037]: I1126 14:39:43.788465 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"10886f85-c800-4999-8c79-c490c60696cc","Type":"ContainerStarted","Data":"b54bd1523c4248a6b946bc2484b15b9a925819b903de19e564491a32a104536e"} Nov 26 14:39:45 crc kubenswrapper[5037]: I1126 14:39:45.812816 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"10886f85-c800-4999-8c79-c490c60696cc","Type":"ContainerStarted","Data":"3269f937868b4639b15beb2313a77a6d697a8359d42a1eac21aab99aba4a3441"} Nov 26 14:39:45 crc kubenswrapper[5037]: I1126 14:39:45.813183 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"10886f85-c800-4999-8c79-c490c60696cc","Type":"ContainerStarted","Data":"a67a55597dfa0413c7fcfba871c60e4ca78dcc1337e642fa8a730a82b2946f38"} Nov 26 14:39:45 crc kubenswrapper[5037]: I1126 14:39:45.813198 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"10886f85-c800-4999-8c79-c490c60696cc","Type":"ContainerStarted","Data":"64e910dd424738dcc2a6a10dbfc0d43ed55b865d44976cf3ce77949fd94d142f"} Nov 26 14:39:46 crc kubenswrapper[5037]: I1126 14:39:46.823926 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"10886f85-c800-4999-8c79-c490c60696cc","Type":"ContainerStarted","Data":"dfe76a7230b634adf6aebdf67b296fb27df1714714ebd62003a566a079db4ad7"} Nov 26 14:39:47 crc kubenswrapper[5037]: I1126 14:39:47.855067 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"10886f85-c800-4999-8c79-c490c60696cc","Type":"ContainerStarted","Data":"b162b6c5fe59ecea9688eeaa61133779a54660d54311beec4f4febf2fc191948"} Nov 26 14:39:47 crc kubenswrapper[5037]: I1126 14:39:47.855442 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"10886f85-c800-4999-8c79-c490c60696cc","Type":"ContainerStarted","Data":"1a454db31dd2243c0baed5f659db3f03cf1284eb320367be8d2eeaee2d9e7140"} Nov 26 14:39:48 crc kubenswrapper[5037]: I1126 14:39:48.880276 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"10886f85-c800-4999-8c79-c490c60696cc","Type":"ContainerStarted","Data":"3af054768a48001311b15006237ee32a28fa31bc5a3ba26f86659bf895c4f0b5"} Nov 26 14:39:48 crc kubenswrapper[5037]: I1126 14:39:48.880784 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"10886f85-c800-4999-8c79-c490c60696cc","Type":"ContainerStarted","Data":"c8f7e68bd6dcee155bb73bde0f7e251636a9f691fa99efe37da6d71e22470060"} Nov 26 14:39:48 crc kubenswrapper[5037]: I1126 14:39:48.880824 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"10886f85-c800-4999-8c79-c490c60696cc","Type":"ContainerStarted","Data":"73c060abeb7573649685e311227f2a579fdf95557d8415f02f112eb7df9fe387"} Nov 26 14:39:48 crc kubenswrapper[5037]: I1126 14:39:48.880838 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"10886f85-c800-4999-8c79-c490c60696cc","Type":"ContainerStarted","Data":"ad88500c12de1786aaa4f3cd261187b528c098a8b57abe7c0b3889beed1fd349"} Nov 26 14:39:48 crc kubenswrapper[5037]: I1126 14:39:48.880852 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"10886f85-c800-4999-8c79-c490c60696cc","Type":"ContainerStarted","Data":"38af5291214696fc2ab5068031bf723126b6ea1a4502cbaf41fec1945bdddb71"} Nov 26 14:39:48 crc kubenswrapper[5037]: I1126 14:39:48.934965 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=37.842916738 podStartE2EDuration="43.934938703s" podCreationTimestamp="2025-11-26 14:39:05 +0000 UTC" firstStartedPulling="2025-11-26 14:39:41.151371781 +0000 UTC m=+1447.948141965" lastFinishedPulling="2025-11-26 14:39:47.243393746 +0000 UTC m=+1454.040163930" observedRunningTime="2025-11-26 14:39:48.932278338 +0000 UTC m=+1455.729048612" watchObservedRunningTime="2025-11-26 14:39:48.934938703 +0000 UTC m=+1455.731708897" Nov 26 14:39:49 crc kubenswrapper[5037]: I1126 14:39:49.219276 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-86c887b9fc-fhqq9"] Nov 26 14:39:49 crc kubenswrapper[5037]: E1126 14:39:49.219597 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7aa59b69-f9b2-4125-b65c-4bc91f674de0" containerName="registry-server" Nov 26 14:39:49 crc kubenswrapper[5037]: I1126 14:39:49.219610 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="7aa59b69-f9b2-4125-b65c-4bc91f674de0" containerName="registry-server" Nov 26 14:39:49 crc kubenswrapper[5037]: E1126 14:39:49.219623 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7aa59b69-f9b2-4125-b65c-4bc91f674de0" containerName="extract-utilities" Nov 26 14:39:49 crc kubenswrapper[5037]: I1126 14:39:49.219629 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="7aa59b69-f9b2-4125-b65c-4bc91f674de0" containerName="extract-utilities" Nov 26 14:39:49 crc kubenswrapper[5037]: E1126 14:39:49.219648 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5648241d-e658-4dce-86b1-f2b79ea11c56" containerName="ovn-config" Nov 26 14:39:49 crc kubenswrapper[5037]: I1126 14:39:49.219655 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="5648241d-e658-4dce-86b1-f2b79ea11c56" containerName="ovn-config" Nov 26 14:39:49 crc kubenswrapper[5037]: E1126 14:39:49.219666 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7aa59b69-f9b2-4125-b65c-4bc91f674de0" containerName="extract-content" Nov 26 14:39:49 crc kubenswrapper[5037]: I1126 14:39:49.219672 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="7aa59b69-f9b2-4125-b65c-4bc91f674de0" containerName="extract-content" Nov 26 14:39:49 crc kubenswrapper[5037]: I1126 14:39:49.219808 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="7aa59b69-f9b2-4125-b65c-4bc91f674de0" containerName="registry-server" Nov 26 14:39:49 crc kubenswrapper[5037]: I1126 14:39:49.219820 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="5648241d-e658-4dce-86b1-f2b79ea11c56" containerName="ovn-config" Nov 26 14:39:49 crc kubenswrapper[5037]: I1126 14:39:49.220803 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86c887b9fc-fhqq9" Nov 26 14:39:49 crc kubenswrapper[5037]: I1126 14:39:49.222781 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Nov 26 14:39:49 crc kubenswrapper[5037]: I1126 14:39:49.240557 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86c887b9fc-fhqq9"] Nov 26 14:39:49 crc kubenswrapper[5037]: I1126 14:39:49.324477 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9jh9z\" (UniqueName: \"kubernetes.io/projected/c84209aa-144f-4082-88b0-c83eb7e57f24-kube-api-access-9jh9z\") pod \"dnsmasq-dns-86c887b9fc-fhqq9\" (UID: \"c84209aa-144f-4082-88b0-c83eb7e57f24\") " pod="openstack/dnsmasq-dns-86c887b9fc-fhqq9" Nov 26 14:39:49 crc kubenswrapper[5037]: I1126 14:39:49.324561 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c84209aa-144f-4082-88b0-c83eb7e57f24-dns-svc\") pod \"dnsmasq-dns-86c887b9fc-fhqq9\" (UID: \"c84209aa-144f-4082-88b0-c83eb7e57f24\") " pod="openstack/dnsmasq-dns-86c887b9fc-fhqq9" Nov 26 14:39:49 crc kubenswrapper[5037]: I1126 14:39:49.324704 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c84209aa-144f-4082-88b0-c83eb7e57f24-dns-swift-storage-0\") pod \"dnsmasq-dns-86c887b9fc-fhqq9\" (UID: \"c84209aa-144f-4082-88b0-c83eb7e57f24\") " pod="openstack/dnsmasq-dns-86c887b9fc-fhqq9" Nov 26 14:39:49 crc kubenswrapper[5037]: I1126 14:39:49.324743 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c84209aa-144f-4082-88b0-c83eb7e57f24-config\") pod \"dnsmasq-dns-86c887b9fc-fhqq9\" (UID: \"c84209aa-144f-4082-88b0-c83eb7e57f24\") " pod="openstack/dnsmasq-dns-86c887b9fc-fhqq9" Nov 26 14:39:49 crc kubenswrapper[5037]: I1126 14:39:49.324866 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c84209aa-144f-4082-88b0-c83eb7e57f24-ovsdbserver-nb\") pod \"dnsmasq-dns-86c887b9fc-fhqq9\" (UID: \"c84209aa-144f-4082-88b0-c83eb7e57f24\") " pod="openstack/dnsmasq-dns-86c887b9fc-fhqq9" Nov 26 14:39:49 crc kubenswrapper[5037]: I1126 14:39:49.324912 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c84209aa-144f-4082-88b0-c83eb7e57f24-ovsdbserver-sb\") pod \"dnsmasq-dns-86c887b9fc-fhqq9\" (UID: \"c84209aa-144f-4082-88b0-c83eb7e57f24\") " pod="openstack/dnsmasq-dns-86c887b9fc-fhqq9" Nov 26 14:39:49 crc kubenswrapper[5037]: I1126 14:39:49.426351 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c84209aa-144f-4082-88b0-c83eb7e57f24-dns-svc\") pod \"dnsmasq-dns-86c887b9fc-fhqq9\" (UID: \"c84209aa-144f-4082-88b0-c83eb7e57f24\") " pod="openstack/dnsmasq-dns-86c887b9fc-fhqq9" Nov 26 14:39:49 crc kubenswrapper[5037]: I1126 14:39:49.426551 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c84209aa-144f-4082-88b0-c83eb7e57f24-config\") pod \"dnsmasq-dns-86c887b9fc-fhqq9\" (UID: \"c84209aa-144f-4082-88b0-c83eb7e57f24\") " pod="openstack/dnsmasq-dns-86c887b9fc-fhqq9" Nov 26 14:39:49 crc kubenswrapper[5037]: I1126 14:39:49.426768 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c84209aa-144f-4082-88b0-c83eb7e57f24-dns-swift-storage-0\") pod \"dnsmasq-dns-86c887b9fc-fhqq9\" (UID: \"c84209aa-144f-4082-88b0-c83eb7e57f24\") " pod="openstack/dnsmasq-dns-86c887b9fc-fhqq9" Nov 26 14:39:49 crc kubenswrapper[5037]: I1126 14:39:49.426910 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c84209aa-144f-4082-88b0-c83eb7e57f24-ovsdbserver-nb\") pod \"dnsmasq-dns-86c887b9fc-fhqq9\" (UID: \"c84209aa-144f-4082-88b0-c83eb7e57f24\") " pod="openstack/dnsmasq-dns-86c887b9fc-fhqq9" Nov 26 14:39:49 crc kubenswrapper[5037]: I1126 14:39:49.426950 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c84209aa-144f-4082-88b0-c83eb7e57f24-ovsdbserver-sb\") pod \"dnsmasq-dns-86c887b9fc-fhqq9\" (UID: \"c84209aa-144f-4082-88b0-c83eb7e57f24\") " pod="openstack/dnsmasq-dns-86c887b9fc-fhqq9" Nov 26 14:39:49 crc kubenswrapper[5037]: I1126 14:39:49.426997 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9jh9z\" (UniqueName: \"kubernetes.io/projected/c84209aa-144f-4082-88b0-c83eb7e57f24-kube-api-access-9jh9z\") pod \"dnsmasq-dns-86c887b9fc-fhqq9\" (UID: \"c84209aa-144f-4082-88b0-c83eb7e57f24\") " pod="openstack/dnsmasq-dns-86c887b9fc-fhqq9" Nov 26 14:39:49 crc kubenswrapper[5037]: I1126 14:39:49.427307 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c84209aa-144f-4082-88b0-c83eb7e57f24-dns-svc\") pod \"dnsmasq-dns-86c887b9fc-fhqq9\" (UID: \"c84209aa-144f-4082-88b0-c83eb7e57f24\") " pod="openstack/dnsmasq-dns-86c887b9fc-fhqq9" Nov 26 14:39:49 crc kubenswrapper[5037]: I1126 14:39:49.427938 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c84209aa-144f-4082-88b0-c83eb7e57f24-dns-swift-storage-0\") pod \"dnsmasq-dns-86c887b9fc-fhqq9\" (UID: \"c84209aa-144f-4082-88b0-c83eb7e57f24\") " pod="openstack/dnsmasq-dns-86c887b9fc-fhqq9" Nov 26 14:39:49 crc kubenswrapper[5037]: I1126 14:39:49.428634 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c84209aa-144f-4082-88b0-c83eb7e57f24-ovsdbserver-nb\") pod \"dnsmasq-dns-86c887b9fc-fhqq9\" (UID: \"c84209aa-144f-4082-88b0-c83eb7e57f24\") " pod="openstack/dnsmasq-dns-86c887b9fc-fhqq9" Nov 26 14:39:49 crc kubenswrapper[5037]: I1126 14:39:49.428665 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c84209aa-144f-4082-88b0-c83eb7e57f24-config\") pod \"dnsmasq-dns-86c887b9fc-fhqq9\" (UID: \"c84209aa-144f-4082-88b0-c83eb7e57f24\") " pod="openstack/dnsmasq-dns-86c887b9fc-fhqq9" Nov 26 14:39:49 crc kubenswrapper[5037]: I1126 14:39:49.429481 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c84209aa-144f-4082-88b0-c83eb7e57f24-ovsdbserver-sb\") pod \"dnsmasq-dns-86c887b9fc-fhqq9\" (UID: \"c84209aa-144f-4082-88b0-c83eb7e57f24\") " pod="openstack/dnsmasq-dns-86c887b9fc-fhqq9" Nov 26 14:39:49 crc kubenswrapper[5037]: I1126 14:39:49.447249 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9jh9z\" (UniqueName: \"kubernetes.io/projected/c84209aa-144f-4082-88b0-c83eb7e57f24-kube-api-access-9jh9z\") pod \"dnsmasq-dns-86c887b9fc-fhqq9\" (UID: \"c84209aa-144f-4082-88b0-c83eb7e57f24\") " pod="openstack/dnsmasq-dns-86c887b9fc-fhqq9" Nov 26 14:39:49 crc kubenswrapper[5037]: I1126 14:39:49.541964 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86c887b9fc-fhqq9" Nov 26 14:39:50 crc kubenswrapper[5037]: I1126 14:39:50.026113 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86c887b9fc-fhqq9"] Nov 26 14:39:50 crc kubenswrapper[5037]: W1126 14:39:50.034055 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc84209aa_144f_4082_88b0_c83eb7e57f24.slice/crio-21c1bf35ae18cae5d10346a44424c5dcc70b25fd0973a12eb82aa42bf2862ac8 WatchSource:0}: Error finding container 21c1bf35ae18cae5d10346a44424c5dcc70b25fd0973a12eb82aa42bf2862ac8: Status 404 returned error can't find the container with id 21c1bf35ae18cae5d10346a44424c5dcc70b25fd0973a12eb82aa42bf2862ac8 Nov 26 14:39:50 crc kubenswrapper[5037]: I1126 14:39:50.789527 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 26 14:39:50 crc kubenswrapper[5037]: I1126 14:39:50.914399 5037 generic.go:334] "Generic (PLEG): container finished" podID="c84209aa-144f-4082-88b0-c83eb7e57f24" containerID="7694b2e339bea993dbb7ba2c5050c3c43d6c58ee1c74d9584b2a39736f8cd2a1" exitCode=0 Nov 26 14:39:50 crc kubenswrapper[5037]: I1126 14:39:50.914718 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86c887b9fc-fhqq9" event={"ID":"c84209aa-144f-4082-88b0-c83eb7e57f24","Type":"ContainerDied","Data":"7694b2e339bea993dbb7ba2c5050c3c43d6c58ee1c74d9584b2a39736f8cd2a1"} Nov 26 14:39:50 crc kubenswrapper[5037]: I1126 14:39:50.914827 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86c887b9fc-fhqq9" event={"ID":"c84209aa-144f-4082-88b0-c83eb7e57f24","Type":"ContainerStarted","Data":"21c1bf35ae18cae5d10346a44424c5dcc70b25fd0973a12eb82aa42bf2862ac8"} Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.137602 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-f5wmh"] Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.139141 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-f5wmh" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.168588 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-f5wmh"] Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.221119 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-4j78w"] Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.225183 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-4j78w" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.235401 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-4j78w"] Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.245694 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-c68b-account-create-update-8xxjl"] Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.247205 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-c68b-account-create-update-8xxjl" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.264421 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.264439 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pcwcs\" (UniqueName: \"kubernetes.io/projected/791704a5-2365-4a7a-9cb5-5512a543aab2-kube-api-access-pcwcs\") pod \"cinder-db-create-f5wmh\" (UID: \"791704a5-2365-4a7a-9cb5-5512a543aab2\") " pod="openstack/cinder-db-create-f5wmh" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.267182 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/791704a5-2365-4a7a-9cb5-5512a543aab2-operator-scripts\") pod \"cinder-db-create-f5wmh\" (UID: \"791704a5-2365-4a7a-9cb5-5512a543aab2\") " pod="openstack/cinder-db-create-f5wmh" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.290384 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-c68b-account-create-update-8xxjl"] Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.341951 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-f9d3-account-create-update-9vvp4"] Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.342979 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-f9d3-account-create-update-9vvp4" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.355692 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.359115 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-f9d3-account-create-update-9vvp4"] Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.369658 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/791704a5-2365-4a7a-9cb5-5512a543aab2-operator-scripts\") pod \"cinder-db-create-f5wmh\" (UID: \"791704a5-2365-4a7a-9cb5-5512a543aab2\") " pod="openstack/cinder-db-create-f5wmh" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.369714 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pttdj\" (UniqueName: \"kubernetes.io/projected/29d2e7ad-bebe-4d6e-8508-54dc55c7ca2e-kube-api-access-pttdj\") pod \"cinder-c68b-account-create-update-8xxjl\" (UID: \"29d2e7ad-bebe-4d6e-8508-54dc55c7ca2e\") " pod="openstack/cinder-c68b-account-create-update-8xxjl" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.369772 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fmrr4\" (UniqueName: \"kubernetes.io/projected/70ae4ec3-44f5-4978-ba4a-31d762f0d748-kube-api-access-fmrr4\") pod \"barbican-db-create-4j78w\" (UID: \"70ae4ec3-44f5-4978-ba4a-31d762f0d748\") " pod="openstack/barbican-db-create-4j78w" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.369800 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9338ce30-fd3e-4fa1-bbc3-5f73cbba9662-operator-scripts\") pod \"barbican-f9d3-account-create-update-9vvp4\" (UID: \"9338ce30-fd3e-4fa1-bbc3-5f73cbba9662\") " pod="openstack/barbican-f9d3-account-create-update-9vvp4" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.369856 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6hz54\" (UniqueName: \"kubernetes.io/projected/9338ce30-fd3e-4fa1-bbc3-5f73cbba9662-kube-api-access-6hz54\") pod \"barbican-f9d3-account-create-update-9vvp4\" (UID: \"9338ce30-fd3e-4fa1-bbc3-5f73cbba9662\") " pod="openstack/barbican-f9d3-account-create-update-9vvp4" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.369875 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pcwcs\" (UniqueName: \"kubernetes.io/projected/791704a5-2365-4a7a-9cb5-5512a543aab2-kube-api-access-pcwcs\") pod \"cinder-db-create-f5wmh\" (UID: \"791704a5-2365-4a7a-9cb5-5512a543aab2\") " pod="openstack/cinder-db-create-f5wmh" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.369892 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/70ae4ec3-44f5-4978-ba4a-31d762f0d748-operator-scripts\") pod \"barbican-db-create-4j78w\" (UID: \"70ae4ec3-44f5-4978-ba4a-31d762f0d748\") " pod="openstack/barbican-db-create-4j78w" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.369911 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29d2e7ad-bebe-4d6e-8508-54dc55c7ca2e-operator-scripts\") pod \"cinder-c68b-account-create-update-8xxjl\" (UID: \"29d2e7ad-bebe-4d6e-8508-54dc55c7ca2e\") " pod="openstack/cinder-c68b-account-create-update-8xxjl" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.370903 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/791704a5-2365-4a7a-9cb5-5512a543aab2-operator-scripts\") pod \"cinder-db-create-f5wmh\" (UID: \"791704a5-2365-4a7a-9cb5-5512a543aab2\") " pod="openstack/cinder-db-create-f5wmh" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.386684 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pcwcs\" (UniqueName: \"kubernetes.io/projected/791704a5-2365-4a7a-9cb5-5512a543aab2-kube-api-access-pcwcs\") pod \"cinder-db-create-f5wmh\" (UID: \"791704a5-2365-4a7a-9cb5-5512a543aab2\") " pod="openstack/cinder-db-create-f5wmh" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.456638 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-f5wmh" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.467883 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-dkcpw"] Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.469113 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-dkcpw" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.471642 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9338ce30-fd3e-4fa1-bbc3-5f73cbba9662-operator-scripts\") pod \"barbican-f9d3-account-create-update-9vvp4\" (UID: \"9338ce30-fd3e-4fa1-bbc3-5f73cbba9662\") " pod="openstack/barbican-f9d3-account-create-update-9vvp4" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.471700 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ae25f93-9fa1-42e0-8a13-984460bdd087-config-data\") pod \"keystone-db-sync-dkcpw\" (UID: \"3ae25f93-9fa1-42e0-8a13-984460bdd087\") " pod="openstack/keystone-db-sync-dkcpw" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.471768 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ae25f93-9fa1-42e0-8a13-984460bdd087-combined-ca-bundle\") pod \"keystone-db-sync-dkcpw\" (UID: \"3ae25f93-9fa1-42e0-8a13-984460bdd087\") " pod="openstack/keystone-db-sync-dkcpw" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.471818 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6hz54\" (UniqueName: \"kubernetes.io/projected/9338ce30-fd3e-4fa1-bbc3-5f73cbba9662-kube-api-access-6hz54\") pod \"barbican-f9d3-account-create-update-9vvp4\" (UID: \"9338ce30-fd3e-4fa1-bbc3-5f73cbba9662\") " pod="openstack/barbican-f9d3-account-create-update-9vvp4" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.471848 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/70ae4ec3-44f5-4978-ba4a-31d762f0d748-operator-scripts\") pod \"barbican-db-create-4j78w\" (UID: \"70ae4ec3-44f5-4978-ba4a-31d762f0d748\") " pod="openstack/barbican-db-create-4j78w" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.471875 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29d2e7ad-bebe-4d6e-8508-54dc55c7ca2e-operator-scripts\") pod \"cinder-c68b-account-create-update-8xxjl\" (UID: \"29d2e7ad-bebe-4d6e-8508-54dc55c7ca2e\") " pod="openstack/cinder-c68b-account-create-update-8xxjl" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.471919 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bmm7t\" (UniqueName: \"kubernetes.io/projected/3ae25f93-9fa1-42e0-8a13-984460bdd087-kube-api-access-bmm7t\") pod \"keystone-db-sync-dkcpw\" (UID: \"3ae25f93-9fa1-42e0-8a13-984460bdd087\") " pod="openstack/keystone-db-sync-dkcpw" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.471974 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pttdj\" (UniqueName: \"kubernetes.io/projected/29d2e7ad-bebe-4d6e-8508-54dc55c7ca2e-kube-api-access-pttdj\") pod \"cinder-c68b-account-create-update-8xxjl\" (UID: \"29d2e7ad-bebe-4d6e-8508-54dc55c7ca2e\") " pod="openstack/cinder-c68b-account-create-update-8xxjl" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.472045 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fmrr4\" (UniqueName: \"kubernetes.io/projected/70ae4ec3-44f5-4978-ba4a-31d762f0d748-kube-api-access-fmrr4\") pod \"barbican-db-create-4j78w\" (UID: \"70ae4ec3-44f5-4978-ba4a-31d762f0d748\") " pod="openstack/barbican-db-create-4j78w" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.472505 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9338ce30-fd3e-4fa1-bbc3-5f73cbba9662-operator-scripts\") pod \"barbican-f9d3-account-create-update-9vvp4\" (UID: \"9338ce30-fd3e-4fa1-bbc3-5f73cbba9662\") " pod="openstack/barbican-f9d3-account-create-update-9vvp4" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.473008 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/70ae4ec3-44f5-4978-ba4a-31d762f0d748-operator-scripts\") pod \"barbican-db-create-4j78w\" (UID: \"70ae4ec3-44f5-4978-ba4a-31d762f0d748\") " pod="openstack/barbican-db-create-4j78w" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.473136 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29d2e7ad-bebe-4d6e-8508-54dc55c7ca2e-operator-scripts\") pod \"cinder-c68b-account-create-update-8xxjl\" (UID: \"29d2e7ad-bebe-4d6e-8508-54dc55c7ca2e\") " pod="openstack/cinder-c68b-account-create-update-8xxjl" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.480849 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-fpv7w" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.480935 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.481074 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.481171 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.496012 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pttdj\" (UniqueName: \"kubernetes.io/projected/29d2e7ad-bebe-4d6e-8508-54dc55c7ca2e-kube-api-access-pttdj\") pod \"cinder-c68b-account-create-update-8xxjl\" (UID: \"29d2e7ad-bebe-4d6e-8508-54dc55c7ca2e\") " pod="openstack/cinder-c68b-account-create-update-8xxjl" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.510555 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.535023 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fmrr4\" (UniqueName: \"kubernetes.io/projected/70ae4ec3-44f5-4978-ba4a-31d762f0d748-kube-api-access-fmrr4\") pod \"barbican-db-create-4j78w\" (UID: \"70ae4ec3-44f5-4978-ba4a-31d762f0d748\") " pod="openstack/barbican-db-create-4j78w" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.544825 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-dkcpw"] Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.550570 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-4j78w" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.550914 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6hz54\" (UniqueName: \"kubernetes.io/projected/9338ce30-fd3e-4fa1-bbc3-5f73cbba9662-kube-api-access-6hz54\") pod \"barbican-f9d3-account-create-update-9vvp4\" (UID: \"9338ce30-fd3e-4fa1-bbc3-5f73cbba9662\") " pod="openstack/barbican-f9d3-account-create-update-9vvp4" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.573002 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ae25f93-9fa1-42e0-8a13-984460bdd087-config-data\") pod \"keystone-db-sync-dkcpw\" (UID: \"3ae25f93-9fa1-42e0-8a13-984460bdd087\") " pod="openstack/keystone-db-sync-dkcpw" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.573245 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ae25f93-9fa1-42e0-8a13-984460bdd087-combined-ca-bundle\") pod \"keystone-db-sync-dkcpw\" (UID: \"3ae25f93-9fa1-42e0-8a13-984460bdd087\") " pod="openstack/keystone-db-sync-dkcpw" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.573381 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bmm7t\" (UniqueName: \"kubernetes.io/projected/3ae25f93-9fa1-42e0-8a13-984460bdd087-kube-api-access-bmm7t\") pod \"keystone-db-sync-dkcpw\" (UID: \"3ae25f93-9fa1-42e0-8a13-984460bdd087\") " pod="openstack/keystone-db-sync-dkcpw" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.576134 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-c68b-account-create-update-8xxjl" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.581659 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-3cec-account-create-update-rnbdt"] Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.582309 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ae25f93-9fa1-42e0-8a13-984460bdd087-config-data\") pod \"keystone-db-sync-dkcpw\" (UID: \"3ae25f93-9fa1-42e0-8a13-984460bdd087\") " pod="openstack/keystone-db-sync-dkcpw" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.582933 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-3cec-account-create-update-rnbdt" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.586644 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.587251 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ae25f93-9fa1-42e0-8a13-984460bdd087-combined-ca-bundle\") pod \"keystone-db-sync-dkcpw\" (UID: \"3ae25f93-9fa1-42e0-8a13-984460bdd087\") " pod="openstack/keystone-db-sync-dkcpw" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.628861 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-4lgf6"] Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.629643 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bmm7t\" (UniqueName: \"kubernetes.io/projected/3ae25f93-9fa1-42e0-8a13-984460bdd087-kube-api-access-bmm7t\") pod \"keystone-db-sync-dkcpw\" (UID: \"3ae25f93-9fa1-42e0-8a13-984460bdd087\") " pod="openstack/keystone-db-sync-dkcpw" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.630366 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-4lgf6" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.634850 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-3cec-account-create-update-rnbdt"] Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.651010 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-4lgf6"] Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.664744 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-f9d3-account-create-update-9vvp4" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.776765 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjkjw\" (UniqueName: \"kubernetes.io/projected/be3a8314-c6a3-4c32-982e-47a36ea01821-kube-api-access-zjkjw\") pod \"neutron-3cec-account-create-update-rnbdt\" (UID: \"be3a8314-c6a3-4c32-982e-47a36ea01821\") " pod="openstack/neutron-3cec-account-create-update-rnbdt" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.777134 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/be3a8314-c6a3-4c32-982e-47a36ea01821-operator-scripts\") pod \"neutron-3cec-account-create-update-rnbdt\" (UID: \"be3a8314-c6a3-4c32-982e-47a36ea01821\") " pod="openstack/neutron-3cec-account-create-update-rnbdt" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.777222 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/12c2cbda-612f-4d50-a8ff-a4a893fd62ea-operator-scripts\") pod \"neutron-db-create-4lgf6\" (UID: \"12c2cbda-612f-4d50-a8ff-a4a893fd62ea\") " pod="openstack/neutron-db-create-4lgf6" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.777339 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8h4l8\" (UniqueName: \"kubernetes.io/projected/12c2cbda-612f-4d50-a8ff-a4a893fd62ea-kube-api-access-8h4l8\") pod \"neutron-db-create-4lgf6\" (UID: \"12c2cbda-612f-4d50-a8ff-a4a893fd62ea\") " pod="openstack/neutron-db-create-4lgf6" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.813403 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-dkcpw" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.879246 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjkjw\" (UniqueName: \"kubernetes.io/projected/be3a8314-c6a3-4c32-982e-47a36ea01821-kube-api-access-zjkjw\") pod \"neutron-3cec-account-create-update-rnbdt\" (UID: \"be3a8314-c6a3-4c32-982e-47a36ea01821\") " pod="openstack/neutron-3cec-account-create-update-rnbdt" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.879757 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/be3a8314-c6a3-4c32-982e-47a36ea01821-operator-scripts\") pod \"neutron-3cec-account-create-update-rnbdt\" (UID: \"be3a8314-c6a3-4c32-982e-47a36ea01821\") " pod="openstack/neutron-3cec-account-create-update-rnbdt" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.880672 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/be3a8314-c6a3-4c32-982e-47a36ea01821-operator-scripts\") pod \"neutron-3cec-account-create-update-rnbdt\" (UID: \"be3a8314-c6a3-4c32-982e-47a36ea01821\") " pod="openstack/neutron-3cec-account-create-update-rnbdt" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.880749 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/12c2cbda-612f-4d50-a8ff-a4a893fd62ea-operator-scripts\") pod \"neutron-db-create-4lgf6\" (UID: \"12c2cbda-612f-4d50-a8ff-a4a893fd62ea\") " pod="openstack/neutron-db-create-4lgf6" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.881550 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/12c2cbda-612f-4d50-a8ff-a4a893fd62ea-operator-scripts\") pod \"neutron-db-create-4lgf6\" (UID: \"12c2cbda-612f-4d50-a8ff-a4a893fd62ea\") " pod="openstack/neutron-db-create-4lgf6" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.881620 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8h4l8\" (UniqueName: \"kubernetes.io/projected/12c2cbda-612f-4d50-a8ff-a4a893fd62ea-kube-api-access-8h4l8\") pod \"neutron-db-create-4lgf6\" (UID: \"12c2cbda-612f-4d50-a8ff-a4a893fd62ea\") " pod="openstack/neutron-db-create-4lgf6" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.897791 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjkjw\" (UniqueName: \"kubernetes.io/projected/be3a8314-c6a3-4c32-982e-47a36ea01821-kube-api-access-zjkjw\") pod \"neutron-3cec-account-create-update-rnbdt\" (UID: \"be3a8314-c6a3-4c32-982e-47a36ea01821\") " pod="openstack/neutron-3cec-account-create-update-rnbdt" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.899091 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8h4l8\" (UniqueName: \"kubernetes.io/projected/12c2cbda-612f-4d50-a8ff-a4a893fd62ea-kube-api-access-8h4l8\") pod \"neutron-db-create-4lgf6\" (UID: \"12c2cbda-612f-4d50-a8ff-a4a893fd62ea\") " pod="openstack/neutron-db-create-4lgf6" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.917690 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-3cec-account-create-update-rnbdt" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.936065 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86c887b9fc-fhqq9" event={"ID":"c84209aa-144f-4082-88b0-c83eb7e57f24","Type":"ContainerStarted","Data":"8889cdc2d76061e9766b819c8df825c58beba2f2de8eb27333ea5d2840bb576f"} Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.936520 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-86c887b9fc-fhqq9" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.959662 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-86c887b9fc-fhqq9" podStartSLOduration=2.959645101 podStartE2EDuration="2.959645101s" podCreationTimestamp="2025-11-26 14:39:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:39:51.955219832 +0000 UTC m=+1458.751990026" watchObservedRunningTime="2025-11-26 14:39:51.959645101 +0000 UTC m=+1458.756415285" Nov 26 14:39:51 crc kubenswrapper[5037]: I1126 14:39:51.998528 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-4j78w"] Nov 26 14:39:52 crc kubenswrapper[5037]: I1126 14:39:52.009952 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-f5wmh"] Nov 26 14:39:52 crc kubenswrapper[5037]: W1126 14:39:52.015755 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod70ae4ec3_44f5_4978_ba4a_31d762f0d748.slice/crio-4de7767372690560e77e83c48cb2a9d866653a33a4760f17d890504ed73a7037 WatchSource:0}: Error finding container 4de7767372690560e77e83c48cb2a9d866653a33a4760f17d890504ed73a7037: Status 404 returned error can't find the container with id 4de7767372690560e77e83c48cb2a9d866653a33a4760f17d890504ed73a7037 Nov 26 14:39:52 crc kubenswrapper[5037]: I1126 14:39:52.048893 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-f9d3-account-create-update-9vvp4"] Nov 26 14:39:52 crc kubenswrapper[5037]: I1126 14:39:52.080066 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-4lgf6" Nov 26 14:39:52 crc kubenswrapper[5037]: I1126 14:39:52.123105 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-c68b-account-create-update-8xxjl"] Nov 26 14:39:52 crc kubenswrapper[5037]: I1126 14:39:52.169537 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-dkcpw"] Nov 26 14:39:52 crc kubenswrapper[5037]: W1126 14:39:52.178266 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3ae25f93_9fa1_42e0_8a13_984460bdd087.slice/crio-20e94b4f9f734ea293ac6b6d5a80689ab8f77dcae2a4f9da1a2adef80d61c7df WatchSource:0}: Error finding container 20e94b4f9f734ea293ac6b6d5a80689ab8f77dcae2a4f9da1a2adef80d61c7df: Status 404 returned error can't find the container with id 20e94b4f9f734ea293ac6b6d5a80689ab8f77dcae2a4f9da1a2adef80d61c7df Nov 26 14:39:52 crc kubenswrapper[5037]: I1126 14:39:52.500578 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-3cec-account-create-update-rnbdt"] Nov 26 14:39:52 crc kubenswrapper[5037]: W1126 14:39:52.501252 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbe3a8314_c6a3_4c32_982e_47a36ea01821.slice/crio-eb19a8a03aeafbb4e30180cf818d4683b8db86e54382ee0ba1ca1d310441f7db WatchSource:0}: Error finding container eb19a8a03aeafbb4e30180cf818d4683b8db86e54382ee0ba1ca1d310441f7db: Status 404 returned error can't find the container with id eb19a8a03aeafbb4e30180cf818d4683b8db86e54382ee0ba1ca1d310441f7db Nov 26 14:39:52 crc kubenswrapper[5037]: I1126 14:39:52.651611 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-4lgf6"] Nov 26 14:39:52 crc kubenswrapper[5037]: I1126 14:39:52.946643 5037 generic.go:334] "Generic (PLEG): container finished" podID="29d2e7ad-bebe-4d6e-8508-54dc55c7ca2e" containerID="012cb3b03b0ecb9c337fe08575cd3c4a80bdc4a9dd8c07213bf0de31477aa103" exitCode=0 Nov 26 14:39:52 crc kubenswrapper[5037]: I1126 14:39:52.946981 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-c68b-account-create-update-8xxjl" event={"ID":"29d2e7ad-bebe-4d6e-8508-54dc55c7ca2e","Type":"ContainerDied","Data":"012cb3b03b0ecb9c337fe08575cd3c4a80bdc4a9dd8c07213bf0de31477aa103"} Nov 26 14:39:52 crc kubenswrapper[5037]: I1126 14:39:52.947012 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-c68b-account-create-update-8xxjl" event={"ID":"29d2e7ad-bebe-4d6e-8508-54dc55c7ca2e","Type":"ContainerStarted","Data":"8e10d79fa16a36098ab92179f33dafb5faf70c5d610a5b8c99d4182890acc245"} Nov 26 14:39:52 crc kubenswrapper[5037]: I1126 14:39:52.949389 5037 generic.go:334] "Generic (PLEG): container finished" podID="791704a5-2365-4a7a-9cb5-5512a543aab2" containerID="3a6b281560c42c3b10b3817d2b216fece39436338f5a606eb423a1727282ed62" exitCode=0 Nov 26 14:39:52 crc kubenswrapper[5037]: I1126 14:39:52.949461 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-f5wmh" event={"ID":"791704a5-2365-4a7a-9cb5-5512a543aab2","Type":"ContainerDied","Data":"3a6b281560c42c3b10b3817d2b216fece39436338f5a606eb423a1727282ed62"} Nov 26 14:39:52 crc kubenswrapper[5037]: I1126 14:39:52.949487 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-f5wmh" event={"ID":"791704a5-2365-4a7a-9cb5-5512a543aab2","Type":"ContainerStarted","Data":"716af06c41c4c88cdb47db55290f4f659bf2c2ee47953c3af963b6e9654c73d5"} Nov 26 14:39:52 crc kubenswrapper[5037]: I1126 14:39:52.953709 5037 generic.go:334] "Generic (PLEG): container finished" podID="70ae4ec3-44f5-4978-ba4a-31d762f0d748" containerID="941fc9a3959ee8002aab79da103b04201cca95b57887065e0bd2e9bc035ac27b" exitCode=0 Nov 26 14:39:52 crc kubenswrapper[5037]: I1126 14:39:52.953772 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-4j78w" event={"ID":"70ae4ec3-44f5-4978-ba4a-31d762f0d748","Type":"ContainerDied","Data":"941fc9a3959ee8002aab79da103b04201cca95b57887065e0bd2e9bc035ac27b"} Nov 26 14:39:52 crc kubenswrapper[5037]: I1126 14:39:52.953797 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-4j78w" event={"ID":"70ae4ec3-44f5-4978-ba4a-31d762f0d748","Type":"ContainerStarted","Data":"4de7767372690560e77e83c48cb2a9d866653a33a4760f17d890504ed73a7037"} Nov 26 14:39:52 crc kubenswrapper[5037]: I1126 14:39:52.957977 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-3cec-account-create-update-rnbdt" event={"ID":"be3a8314-c6a3-4c32-982e-47a36ea01821","Type":"ContainerStarted","Data":"eb19a8a03aeafbb4e30180cf818d4683b8db86e54382ee0ba1ca1d310441f7db"} Nov 26 14:39:52 crc kubenswrapper[5037]: I1126 14:39:52.967185 5037 generic.go:334] "Generic (PLEG): container finished" podID="9338ce30-fd3e-4fa1-bbc3-5f73cbba9662" containerID="743bc6d2ce27587012763710fbf1732a386ed06688e5f64f97de51cecbdae318" exitCode=0 Nov 26 14:39:52 crc kubenswrapper[5037]: I1126 14:39:52.967261 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-f9d3-account-create-update-9vvp4" event={"ID":"9338ce30-fd3e-4fa1-bbc3-5f73cbba9662","Type":"ContainerDied","Data":"743bc6d2ce27587012763710fbf1732a386ed06688e5f64f97de51cecbdae318"} Nov 26 14:39:52 crc kubenswrapper[5037]: I1126 14:39:52.967329 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-f9d3-account-create-update-9vvp4" event={"ID":"9338ce30-fd3e-4fa1-bbc3-5f73cbba9662","Type":"ContainerStarted","Data":"093681df3a980c6a2d969d9b3fd15e8921dfc3df36b962f51a77c17388e97d69"} Nov 26 14:39:52 crc kubenswrapper[5037]: I1126 14:39:52.969710 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-dkcpw" event={"ID":"3ae25f93-9fa1-42e0-8a13-984460bdd087","Type":"ContainerStarted","Data":"20e94b4f9f734ea293ac6b6d5a80689ab8f77dcae2a4f9da1a2adef80d61c7df"} Nov 26 14:39:52 crc kubenswrapper[5037]: I1126 14:39:52.972512 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-4lgf6" event={"ID":"12c2cbda-612f-4d50-a8ff-a4a893fd62ea","Type":"ContainerStarted","Data":"a52b077ea3ab8591e1c70efa094bc12871b96c9e846dc335178af1adc54c232e"} Nov 26 14:39:53 crc kubenswrapper[5037]: I1126 14:39:53.985077 5037 generic.go:334] "Generic (PLEG): container finished" podID="be3a8314-c6a3-4c32-982e-47a36ea01821" containerID="28fcb153066df814b94c4891251d6d55dbcb4934ca8da295cafa2d5cddb4b9cf" exitCode=0 Nov 26 14:39:53 crc kubenswrapper[5037]: I1126 14:39:53.985148 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-3cec-account-create-update-rnbdt" event={"ID":"be3a8314-c6a3-4c32-982e-47a36ea01821","Type":"ContainerDied","Data":"28fcb153066df814b94c4891251d6d55dbcb4934ca8da295cafa2d5cddb4b9cf"} Nov 26 14:39:53 crc kubenswrapper[5037]: I1126 14:39:53.989181 5037 generic.go:334] "Generic (PLEG): container finished" podID="12c2cbda-612f-4d50-a8ff-a4a893fd62ea" containerID="14aebf93316a419a26ed225520cf8463cecfed50b9d7d41f74af43e5f4d6d686" exitCode=0 Nov 26 14:39:53 crc kubenswrapper[5037]: I1126 14:39:53.989351 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-4lgf6" event={"ID":"12c2cbda-612f-4d50-a8ff-a4a893fd62ea","Type":"ContainerDied","Data":"14aebf93316a419a26ed225520cf8463cecfed50b9d7d41f74af43e5f4d6d686"} Nov 26 14:39:55 crc kubenswrapper[5037]: I1126 14:39:54.398801 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-f9d3-account-create-update-9vvp4" Nov 26 14:39:55 crc kubenswrapper[5037]: I1126 14:39:54.444110 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-c68b-account-create-update-8xxjl" Nov 26 14:39:55 crc kubenswrapper[5037]: I1126 14:39:54.532964 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6hz54\" (UniqueName: \"kubernetes.io/projected/9338ce30-fd3e-4fa1-bbc3-5f73cbba9662-kube-api-access-6hz54\") pod \"9338ce30-fd3e-4fa1-bbc3-5f73cbba9662\" (UID: \"9338ce30-fd3e-4fa1-bbc3-5f73cbba9662\") " Nov 26 14:39:55 crc kubenswrapper[5037]: I1126 14:39:54.533153 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9338ce30-fd3e-4fa1-bbc3-5f73cbba9662-operator-scripts\") pod \"9338ce30-fd3e-4fa1-bbc3-5f73cbba9662\" (UID: \"9338ce30-fd3e-4fa1-bbc3-5f73cbba9662\") " Nov 26 14:39:55 crc kubenswrapper[5037]: I1126 14:39:54.534036 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9338ce30-fd3e-4fa1-bbc3-5f73cbba9662-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9338ce30-fd3e-4fa1-bbc3-5f73cbba9662" (UID: "9338ce30-fd3e-4fa1-bbc3-5f73cbba9662"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:39:55 crc kubenswrapper[5037]: I1126 14:39:54.536911 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9338ce30-fd3e-4fa1-bbc3-5f73cbba9662-kube-api-access-6hz54" (OuterVolumeSpecName: "kube-api-access-6hz54") pod "9338ce30-fd3e-4fa1-bbc3-5f73cbba9662" (UID: "9338ce30-fd3e-4fa1-bbc3-5f73cbba9662"). InnerVolumeSpecName "kube-api-access-6hz54". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:39:55 crc kubenswrapper[5037]: I1126 14:39:54.635271 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pttdj\" (UniqueName: \"kubernetes.io/projected/29d2e7ad-bebe-4d6e-8508-54dc55c7ca2e-kube-api-access-pttdj\") pod \"29d2e7ad-bebe-4d6e-8508-54dc55c7ca2e\" (UID: \"29d2e7ad-bebe-4d6e-8508-54dc55c7ca2e\") " Nov 26 14:39:55 crc kubenswrapper[5037]: I1126 14:39:54.635480 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29d2e7ad-bebe-4d6e-8508-54dc55c7ca2e-operator-scripts\") pod \"29d2e7ad-bebe-4d6e-8508-54dc55c7ca2e\" (UID: \"29d2e7ad-bebe-4d6e-8508-54dc55c7ca2e\") " Nov 26 14:39:55 crc kubenswrapper[5037]: I1126 14:39:54.635846 5037 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9338ce30-fd3e-4fa1-bbc3-5f73cbba9662-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:55 crc kubenswrapper[5037]: I1126 14:39:54.635862 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6hz54\" (UniqueName: \"kubernetes.io/projected/9338ce30-fd3e-4fa1-bbc3-5f73cbba9662-kube-api-access-6hz54\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:55 crc kubenswrapper[5037]: I1126 14:39:54.636067 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29d2e7ad-bebe-4d6e-8508-54dc55c7ca2e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "29d2e7ad-bebe-4d6e-8508-54dc55c7ca2e" (UID: "29d2e7ad-bebe-4d6e-8508-54dc55c7ca2e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:39:55 crc kubenswrapper[5037]: I1126 14:39:54.638764 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29d2e7ad-bebe-4d6e-8508-54dc55c7ca2e-kube-api-access-pttdj" (OuterVolumeSpecName: "kube-api-access-pttdj") pod "29d2e7ad-bebe-4d6e-8508-54dc55c7ca2e" (UID: "29d2e7ad-bebe-4d6e-8508-54dc55c7ca2e"). InnerVolumeSpecName "kube-api-access-pttdj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:39:55 crc kubenswrapper[5037]: I1126 14:39:54.737339 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pttdj\" (UniqueName: \"kubernetes.io/projected/29d2e7ad-bebe-4d6e-8508-54dc55c7ca2e-kube-api-access-pttdj\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:55 crc kubenswrapper[5037]: I1126 14:39:54.737377 5037 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29d2e7ad-bebe-4d6e-8508-54dc55c7ca2e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:55 crc kubenswrapper[5037]: I1126 14:39:55.003571 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-f9d3-account-create-update-9vvp4" event={"ID":"9338ce30-fd3e-4fa1-bbc3-5f73cbba9662","Type":"ContainerDied","Data":"093681df3a980c6a2d969d9b3fd15e8921dfc3df36b962f51a77c17388e97d69"} Nov 26 14:39:55 crc kubenswrapper[5037]: I1126 14:39:55.003616 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="093681df3a980c6a2d969d9b3fd15e8921dfc3df36b962f51a77c17388e97d69" Nov 26 14:39:55 crc kubenswrapper[5037]: I1126 14:39:55.003580 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-f9d3-account-create-update-9vvp4" Nov 26 14:39:55 crc kubenswrapper[5037]: I1126 14:39:55.007279 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-c68b-account-create-update-8xxjl" event={"ID":"29d2e7ad-bebe-4d6e-8508-54dc55c7ca2e","Type":"ContainerDied","Data":"8e10d79fa16a36098ab92179f33dafb5faf70c5d610a5b8c99d4182890acc245"} Nov 26 14:39:55 crc kubenswrapper[5037]: I1126 14:39:55.007393 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8e10d79fa16a36098ab92179f33dafb5faf70c5d610a5b8c99d4182890acc245" Nov 26 14:39:55 crc kubenswrapper[5037]: I1126 14:39:55.007448 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-c68b-account-create-update-8xxjl" Nov 26 14:39:55 crc kubenswrapper[5037]: I1126 14:39:55.011049 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-mgn9v" event={"ID":"2369115c-ae08-42b0-af64-c42191c04502","Type":"ContainerStarted","Data":"cc6733dda97a3c0d2ee7075a71f1b7520ad8f6875df7af6d1a7a9513450a7825"} Nov 26 14:39:55 crc kubenswrapper[5037]: I1126 14:39:55.435046 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-mgn9v" podStartSLOduration=2.476571487 podStartE2EDuration="31.435010496s" podCreationTimestamp="2025-11-26 14:39:24 +0000 UTC" firstStartedPulling="2025-11-26 14:39:25.4241198 +0000 UTC m=+1432.220889984" lastFinishedPulling="2025-11-26 14:39:54.382558809 +0000 UTC m=+1461.179328993" observedRunningTime="2025-11-26 14:39:55.03242483 +0000 UTC m=+1461.829195014" watchObservedRunningTime="2025-11-26 14:39:55.435010496 +0000 UTC m=+1462.231780710" Nov 26 14:39:57 crc kubenswrapper[5037]: I1126 14:39:57.596951 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-f5wmh" Nov 26 14:39:57 crc kubenswrapper[5037]: I1126 14:39:57.604593 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-3cec-account-create-update-rnbdt" Nov 26 14:39:57 crc kubenswrapper[5037]: I1126 14:39:57.623656 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-4lgf6" Nov 26 14:39:57 crc kubenswrapper[5037]: I1126 14:39:57.644755 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-4j78w" Nov 26 14:39:57 crc kubenswrapper[5037]: I1126 14:39:57.791633 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zjkjw\" (UniqueName: \"kubernetes.io/projected/be3a8314-c6a3-4c32-982e-47a36ea01821-kube-api-access-zjkjw\") pod \"be3a8314-c6a3-4c32-982e-47a36ea01821\" (UID: \"be3a8314-c6a3-4c32-982e-47a36ea01821\") " Nov 26 14:39:57 crc kubenswrapper[5037]: I1126 14:39:57.791740 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/791704a5-2365-4a7a-9cb5-5512a543aab2-operator-scripts\") pod \"791704a5-2365-4a7a-9cb5-5512a543aab2\" (UID: \"791704a5-2365-4a7a-9cb5-5512a543aab2\") " Nov 26 14:39:57 crc kubenswrapper[5037]: I1126 14:39:57.791805 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/70ae4ec3-44f5-4978-ba4a-31d762f0d748-operator-scripts\") pod \"70ae4ec3-44f5-4978-ba4a-31d762f0d748\" (UID: \"70ae4ec3-44f5-4978-ba4a-31d762f0d748\") " Nov 26 14:39:57 crc kubenswrapper[5037]: I1126 14:39:57.791966 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/be3a8314-c6a3-4c32-982e-47a36ea01821-operator-scripts\") pod \"be3a8314-c6a3-4c32-982e-47a36ea01821\" (UID: \"be3a8314-c6a3-4c32-982e-47a36ea01821\") " Nov 26 14:39:57 crc kubenswrapper[5037]: I1126 14:39:57.792041 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/12c2cbda-612f-4d50-a8ff-a4a893fd62ea-operator-scripts\") pod \"12c2cbda-612f-4d50-a8ff-a4a893fd62ea\" (UID: \"12c2cbda-612f-4d50-a8ff-a4a893fd62ea\") " Nov 26 14:39:57 crc kubenswrapper[5037]: I1126 14:39:57.792104 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8h4l8\" (UniqueName: \"kubernetes.io/projected/12c2cbda-612f-4d50-a8ff-a4a893fd62ea-kube-api-access-8h4l8\") pod \"12c2cbda-612f-4d50-a8ff-a4a893fd62ea\" (UID: \"12c2cbda-612f-4d50-a8ff-a4a893fd62ea\") " Nov 26 14:39:57 crc kubenswrapper[5037]: I1126 14:39:57.792199 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fmrr4\" (UniqueName: \"kubernetes.io/projected/70ae4ec3-44f5-4978-ba4a-31d762f0d748-kube-api-access-fmrr4\") pod \"70ae4ec3-44f5-4978-ba4a-31d762f0d748\" (UID: \"70ae4ec3-44f5-4978-ba4a-31d762f0d748\") " Nov 26 14:39:57 crc kubenswrapper[5037]: I1126 14:39:57.792248 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcwcs\" (UniqueName: \"kubernetes.io/projected/791704a5-2365-4a7a-9cb5-5512a543aab2-kube-api-access-pcwcs\") pod \"791704a5-2365-4a7a-9cb5-5512a543aab2\" (UID: \"791704a5-2365-4a7a-9cb5-5512a543aab2\") " Nov 26 14:39:57 crc kubenswrapper[5037]: I1126 14:39:57.794746 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/70ae4ec3-44f5-4978-ba4a-31d762f0d748-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "70ae4ec3-44f5-4978-ba4a-31d762f0d748" (UID: "70ae4ec3-44f5-4978-ba4a-31d762f0d748"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:39:57 crc kubenswrapper[5037]: I1126 14:39:57.794851 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/12c2cbda-612f-4d50-a8ff-a4a893fd62ea-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "12c2cbda-612f-4d50-a8ff-a4a893fd62ea" (UID: "12c2cbda-612f-4d50-a8ff-a4a893fd62ea"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:39:57 crc kubenswrapper[5037]: I1126 14:39:57.794891 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/be3a8314-c6a3-4c32-982e-47a36ea01821-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "be3a8314-c6a3-4c32-982e-47a36ea01821" (UID: "be3a8314-c6a3-4c32-982e-47a36ea01821"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:39:57 crc kubenswrapper[5037]: I1126 14:39:57.795175 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/791704a5-2365-4a7a-9cb5-5512a543aab2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "791704a5-2365-4a7a-9cb5-5512a543aab2" (UID: "791704a5-2365-4a7a-9cb5-5512a543aab2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:39:57 crc kubenswrapper[5037]: I1126 14:39:57.798532 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/791704a5-2365-4a7a-9cb5-5512a543aab2-kube-api-access-pcwcs" (OuterVolumeSpecName: "kube-api-access-pcwcs") pod "791704a5-2365-4a7a-9cb5-5512a543aab2" (UID: "791704a5-2365-4a7a-9cb5-5512a543aab2"). InnerVolumeSpecName "kube-api-access-pcwcs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:39:57 crc kubenswrapper[5037]: I1126 14:39:57.798913 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/70ae4ec3-44f5-4978-ba4a-31d762f0d748-kube-api-access-fmrr4" (OuterVolumeSpecName: "kube-api-access-fmrr4") pod "70ae4ec3-44f5-4978-ba4a-31d762f0d748" (UID: "70ae4ec3-44f5-4978-ba4a-31d762f0d748"). InnerVolumeSpecName "kube-api-access-fmrr4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:39:57 crc kubenswrapper[5037]: I1126 14:39:57.799201 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12c2cbda-612f-4d50-a8ff-a4a893fd62ea-kube-api-access-8h4l8" (OuterVolumeSpecName: "kube-api-access-8h4l8") pod "12c2cbda-612f-4d50-a8ff-a4a893fd62ea" (UID: "12c2cbda-612f-4d50-a8ff-a4a893fd62ea"). InnerVolumeSpecName "kube-api-access-8h4l8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:39:57 crc kubenswrapper[5037]: I1126 14:39:57.807436 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be3a8314-c6a3-4c32-982e-47a36ea01821-kube-api-access-zjkjw" (OuterVolumeSpecName: "kube-api-access-zjkjw") pod "be3a8314-c6a3-4c32-982e-47a36ea01821" (UID: "be3a8314-c6a3-4c32-982e-47a36ea01821"). InnerVolumeSpecName "kube-api-access-zjkjw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:39:57 crc kubenswrapper[5037]: I1126 14:39:57.894098 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8h4l8\" (UniqueName: \"kubernetes.io/projected/12c2cbda-612f-4d50-a8ff-a4a893fd62ea-kube-api-access-8h4l8\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:57 crc kubenswrapper[5037]: I1126 14:39:57.894339 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fmrr4\" (UniqueName: \"kubernetes.io/projected/70ae4ec3-44f5-4978-ba4a-31d762f0d748-kube-api-access-fmrr4\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:57 crc kubenswrapper[5037]: I1126 14:39:57.894349 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcwcs\" (UniqueName: \"kubernetes.io/projected/791704a5-2365-4a7a-9cb5-5512a543aab2-kube-api-access-pcwcs\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:57 crc kubenswrapper[5037]: I1126 14:39:57.894358 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zjkjw\" (UniqueName: \"kubernetes.io/projected/be3a8314-c6a3-4c32-982e-47a36ea01821-kube-api-access-zjkjw\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:57 crc kubenswrapper[5037]: I1126 14:39:57.894369 5037 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/791704a5-2365-4a7a-9cb5-5512a543aab2-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:57 crc kubenswrapper[5037]: I1126 14:39:57.894380 5037 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/70ae4ec3-44f5-4978-ba4a-31d762f0d748-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:57 crc kubenswrapper[5037]: I1126 14:39:57.894388 5037 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/be3a8314-c6a3-4c32-982e-47a36ea01821-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:57 crc kubenswrapper[5037]: I1126 14:39:57.894397 5037 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/12c2cbda-612f-4d50-a8ff-a4a893fd62ea-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:39:58 crc kubenswrapper[5037]: I1126 14:39:58.036325 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-4lgf6" event={"ID":"12c2cbda-612f-4d50-a8ff-a4a893fd62ea","Type":"ContainerDied","Data":"a52b077ea3ab8591e1c70efa094bc12871b96c9e846dc335178af1adc54c232e"} Nov 26 14:39:58 crc kubenswrapper[5037]: I1126 14:39:58.036380 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a52b077ea3ab8591e1c70efa094bc12871b96c9e846dc335178af1adc54c232e" Nov 26 14:39:58 crc kubenswrapper[5037]: I1126 14:39:58.036455 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-4lgf6" Nov 26 14:39:58 crc kubenswrapper[5037]: I1126 14:39:58.039181 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-f5wmh" event={"ID":"791704a5-2365-4a7a-9cb5-5512a543aab2","Type":"ContainerDied","Data":"716af06c41c4c88cdb47db55290f4f659bf2c2ee47953c3af963b6e9654c73d5"} Nov 26 14:39:58 crc kubenswrapper[5037]: I1126 14:39:58.039225 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="716af06c41c4c88cdb47db55290f4f659bf2c2ee47953c3af963b6e9654c73d5" Nov 26 14:39:58 crc kubenswrapper[5037]: I1126 14:39:58.039308 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-f5wmh" Nov 26 14:39:58 crc kubenswrapper[5037]: I1126 14:39:58.044784 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-4j78w" event={"ID":"70ae4ec3-44f5-4978-ba4a-31d762f0d748","Type":"ContainerDied","Data":"4de7767372690560e77e83c48cb2a9d866653a33a4760f17d890504ed73a7037"} Nov 26 14:39:58 crc kubenswrapper[5037]: I1126 14:39:58.044894 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4de7767372690560e77e83c48cb2a9d866653a33a4760f17d890504ed73a7037" Nov 26 14:39:58 crc kubenswrapper[5037]: I1126 14:39:58.045087 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-4j78w" Nov 26 14:39:58 crc kubenswrapper[5037]: I1126 14:39:58.051940 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-3cec-account-create-update-rnbdt" event={"ID":"be3a8314-c6a3-4c32-982e-47a36ea01821","Type":"ContainerDied","Data":"eb19a8a03aeafbb4e30180cf818d4683b8db86e54382ee0ba1ca1d310441f7db"} Nov 26 14:39:58 crc kubenswrapper[5037]: I1126 14:39:58.051979 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eb19a8a03aeafbb4e30180cf818d4683b8db86e54382ee0ba1ca1d310441f7db" Nov 26 14:39:58 crc kubenswrapper[5037]: I1126 14:39:58.052024 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-3cec-account-create-update-rnbdt" Nov 26 14:39:58 crc kubenswrapper[5037]: I1126 14:39:58.056744 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-dkcpw" event={"ID":"3ae25f93-9fa1-42e0-8a13-984460bdd087","Type":"ContainerStarted","Data":"c290f83dd69d9c3a68537baf518675b47c29941befa9a3c4b26cc953e62cd55a"} Nov 26 14:39:58 crc kubenswrapper[5037]: I1126 14:39:58.081719 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-dkcpw" podStartSLOduration=1.8156008529999998 podStartE2EDuration="7.081693166s" podCreationTimestamp="2025-11-26 14:39:51 +0000 UTC" firstStartedPulling="2025-11-26 14:39:52.184639412 +0000 UTC m=+1458.981409596" lastFinishedPulling="2025-11-26 14:39:57.450731715 +0000 UTC m=+1464.247501909" observedRunningTime="2025-11-26 14:39:58.076026368 +0000 UTC m=+1464.872796602" watchObservedRunningTime="2025-11-26 14:39:58.081693166 +0000 UTC m=+1464.878463360" Nov 26 14:39:59 crc kubenswrapper[5037]: I1126 14:39:59.544465 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-86c887b9fc-fhqq9" Nov 26 14:39:59 crc kubenswrapper[5037]: I1126 14:39:59.618153 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cf8bcbfcf-5cc8r"] Nov 26 14:39:59 crc kubenswrapper[5037]: I1126 14:39:59.618759 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-cf8bcbfcf-5cc8r" podUID="8e71c09e-69be-4196-beaf-e34c6b9880bb" containerName="dnsmasq-dns" containerID="cri-o://4b1a296a4b64571a6a460f1df382d22957f75c8bfc4cd8ab7cdb5659c53a0d9e" gracePeriod=10 Nov 26 14:40:00 crc kubenswrapper[5037]: I1126 14:40:00.081518 5037 generic.go:334] "Generic (PLEG): container finished" podID="8e71c09e-69be-4196-beaf-e34c6b9880bb" containerID="4b1a296a4b64571a6a460f1df382d22957f75c8bfc4cd8ab7cdb5659c53a0d9e" exitCode=0 Nov 26 14:40:00 crc kubenswrapper[5037]: I1126 14:40:00.081573 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf8bcbfcf-5cc8r" event={"ID":"8e71c09e-69be-4196-beaf-e34c6b9880bb","Type":"ContainerDied","Data":"4b1a296a4b64571a6a460f1df382d22957f75c8bfc4cd8ab7cdb5659c53a0d9e"} Nov 26 14:40:00 crc kubenswrapper[5037]: I1126 14:40:00.210847 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf8bcbfcf-5cc8r" Nov 26 14:40:00 crc kubenswrapper[5037]: I1126 14:40:00.371166 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4k6ml\" (UniqueName: \"kubernetes.io/projected/8e71c09e-69be-4196-beaf-e34c6b9880bb-kube-api-access-4k6ml\") pod \"8e71c09e-69be-4196-beaf-e34c6b9880bb\" (UID: \"8e71c09e-69be-4196-beaf-e34c6b9880bb\") " Nov 26 14:40:00 crc kubenswrapper[5037]: I1126 14:40:00.371218 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8e71c09e-69be-4196-beaf-e34c6b9880bb-ovsdbserver-sb\") pod \"8e71c09e-69be-4196-beaf-e34c6b9880bb\" (UID: \"8e71c09e-69be-4196-beaf-e34c6b9880bb\") " Nov 26 14:40:00 crc kubenswrapper[5037]: I1126 14:40:00.371321 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8e71c09e-69be-4196-beaf-e34c6b9880bb-config\") pod \"8e71c09e-69be-4196-beaf-e34c6b9880bb\" (UID: \"8e71c09e-69be-4196-beaf-e34c6b9880bb\") " Nov 26 14:40:00 crc kubenswrapper[5037]: I1126 14:40:00.371377 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8e71c09e-69be-4196-beaf-e34c6b9880bb-dns-svc\") pod \"8e71c09e-69be-4196-beaf-e34c6b9880bb\" (UID: \"8e71c09e-69be-4196-beaf-e34c6b9880bb\") " Nov 26 14:40:00 crc kubenswrapper[5037]: I1126 14:40:00.371437 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8e71c09e-69be-4196-beaf-e34c6b9880bb-ovsdbserver-nb\") pod \"8e71c09e-69be-4196-beaf-e34c6b9880bb\" (UID: \"8e71c09e-69be-4196-beaf-e34c6b9880bb\") " Nov 26 14:40:00 crc kubenswrapper[5037]: I1126 14:40:00.376982 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e71c09e-69be-4196-beaf-e34c6b9880bb-kube-api-access-4k6ml" (OuterVolumeSpecName: "kube-api-access-4k6ml") pod "8e71c09e-69be-4196-beaf-e34c6b9880bb" (UID: "8e71c09e-69be-4196-beaf-e34c6b9880bb"). InnerVolumeSpecName "kube-api-access-4k6ml". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:40:00 crc kubenswrapper[5037]: I1126 14:40:00.414597 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e71c09e-69be-4196-beaf-e34c6b9880bb-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8e71c09e-69be-4196-beaf-e34c6b9880bb" (UID: "8e71c09e-69be-4196-beaf-e34c6b9880bb"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:40:00 crc kubenswrapper[5037]: I1126 14:40:00.415172 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e71c09e-69be-4196-beaf-e34c6b9880bb-config" (OuterVolumeSpecName: "config") pod "8e71c09e-69be-4196-beaf-e34c6b9880bb" (UID: "8e71c09e-69be-4196-beaf-e34c6b9880bb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:40:00 crc kubenswrapper[5037]: I1126 14:40:00.419665 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e71c09e-69be-4196-beaf-e34c6b9880bb-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "8e71c09e-69be-4196-beaf-e34c6b9880bb" (UID: "8e71c09e-69be-4196-beaf-e34c6b9880bb"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:40:00 crc kubenswrapper[5037]: I1126 14:40:00.436678 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e71c09e-69be-4196-beaf-e34c6b9880bb-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "8e71c09e-69be-4196-beaf-e34c6b9880bb" (UID: "8e71c09e-69be-4196-beaf-e34c6b9880bb"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:40:00 crc kubenswrapper[5037]: I1126 14:40:00.473078 5037 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8e71c09e-69be-4196-beaf-e34c6b9880bb-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:00 crc kubenswrapper[5037]: I1126 14:40:00.473115 5037 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8e71c09e-69be-4196-beaf-e34c6b9880bb-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:00 crc kubenswrapper[5037]: I1126 14:40:00.473129 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4k6ml\" (UniqueName: \"kubernetes.io/projected/8e71c09e-69be-4196-beaf-e34c6b9880bb-kube-api-access-4k6ml\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:00 crc kubenswrapper[5037]: I1126 14:40:00.473140 5037 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8e71c09e-69be-4196-beaf-e34c6b9880bb-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:00 crc kubenswrapper[5037]: I1126 14:40:00.473148 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8e71c09e-69be-4196-beaf-e34c6b9880bb-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:01 crc kubenswrapper[5037]: I1126 14:40:01.101622 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf8bcbfcf-5cc8r" Nov 26 14:40:01 crc kubenswrapper[5037]: I1126 14:40:01.101529 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf8bcbfcf-5cc8r" event={"ID":"8e71c09e-69be-4196-beaf-e34c6b9880bb","Type":"ContainerDied","Data":"1a564c4f5c87a1dcee66a028b0c284d71116e15e700b1fbb2544140640352a8b"} Nov 26 14:40:01 crc kubenswrapper[5037]: I1126 14:40:01.106373 5037 scope.go:117] "RemoveContainer" containerID="4b1a296a4b64571a6a460f1df382d22957f75c8bfc4cd8ab7cdb5659c53a0d9e" Nov 26 14:40:01 crc kubenswrapper[5037]: I1126 14:40:01.128794 5037 scope.go:117] "RemoveContainer" containerID="cce77c2d47eb6c8a42167b67fa100f6a46dae5339817afbff1660e9604e478b8" Nov 26 14:40:01 crc kubenswrapper[5037]: I1126 14:40:01.160320 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cf8bcbfcf-5cc8r"] Nov 26 14:40:01 crc kubenswrapper[5037]: I1126 14:40:01.174583 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-cf8bcbfcf-5cc8r"] Nov 26 14:40:01 crc kubenswrapper[5037]: I1126 14:40:01.919205 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e71c09e-69be-4196-beaf-e34c6b9880bb" path="/var/lib/kubelet/pods/8e71c09e-69be-4196-beaf-e34c6b9880bb/volumes" Nov 26 14:40:03 crc kubenswrapper[5037]: I1126 14:40:03.124745 5037 generic.go:334] "Generic (PLEG): container finished" podID="3ae25f93-9fa1-42e0-8a13-984460bdd087" containerID="c290f83dd69d9c3a68537baf518675b47c29941befa9a3c4b26cc953e62cd55a" exitCode=0 Nov 26 14:40:03 crc kubenswrapper[5037]: I1126 14:40:03.124859 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-dkcpw" event={"ID":"3ae25f93-9fa1-42e0-8a13-984460bdd087","Type":"ContainerDied","Data":"c290f83dd69d9c3a68537baf518675b47c29941befa9a3c4b26cc953e62cd55a"} Nov 26 14:40:04 crc kubenswrapper[5037]: I1126 14:40:04.138796 5037 generic.go:334] "Generic (PLEG): container finished" podID="2369115c-ae08-42b0-af64-c42191c04502" containerID="cc6733dda97a3c0d2ee7075a71f1b7520ad8f6875df7af6d1a7a9513450a7825" exitCode=0 Nov 26 14:40:04 crc kubenswrapper[5037]: I1126 14:40:04.139024 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-mgn9v" event={"ID":"2369115c-ae08-42b0-af64-c42191c04502","Type":"ContainerDied","Data":"cc6733dda97a3c0d2ee7075a71f1b7520ad8f6875df7af6d1a7a9513450a7825"} Nov 26 14:40:04 crc kubenswrapper[5037]: I1126 14:40:04.411389 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-dkcpw" Nov 26 14:40:04 crc kubenswrapper[5037]: I1126 14:40:04.432551 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bmm7t\" (UniqueName: \"kubernetes.io/projected/3ae25f93-9fa1-42e0-8a13-984460bdd087-kube-api-access-bmm7t\") pod \"3ae25f93-9fa1-42e0-8a13-984460bdd087\" (UID: \"3ae25f93-9fa1-42e0-8a13-984460bdd087\") " Nov 26 14:40:04 crc kubenswrapper[5037]: I1126 14:40:04.432612 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ae25f93-9fa1-42e0-8a13-984460bdd087-config-data\") pod \"3ae25f93-9fa1-42e0-8a13-984460bdd087\" (UID: \"3ae25f93-9fa1-42e0-8a13-984460bdd087\") " Nov 26 14:40:04 crc kubenswrapper[5037]: I1126 14:40:04.432655 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ae25f93-9fa1-42e0-8a13-984460bdd087-combined-ca-bundle\") pod \"3ae25f93-9fa1-42e0-8a13-984460bdd087\" (UID: \"3ae25f93-9fa1-42e0-8a13-984460bdd087\") " Nov 26 14:40:04 crc kubenswrapper[5037]: I1126 14:40:04.439783 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ae25f93-9fa1-42e0-8a13-984460bdd087-kube-api-access-bmm7t" (OuterVolumeSpecName: "kube-api-access-bmm7t") pod "3ae25f93-9fa1-42e0-8a13-984460bdd087" (UID: "3ae25f93-9fa1-42e0-8a13-984460bdd087"). InnerVolumeSpecName "kube-api-access-bmm7t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:40:04 crc kubenswrapper[5037]: I1126 14:40:04.473990 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ae25f93-9fa1-42e0-8a13-984460bdd087-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3ae25f93-9fa1-42e0-8a13-984460bdd087" (UID: "3ae25f93-9fa1-42e0-8a13-984460bdd087"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:40:04 crc kubenswrapper[5037]: I1126 14:40:04.484928 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ae25f93-9fa1-42e0-8a13-984460bdd087-config-data" (OuterVolumeSpecName: "config-data") pod "3ae25f93-9fa1-42e0-8a13-984460bdd087" (UID: "3ae25f93-9fa1-42e0-8a13-984460bdd087"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:40:04 crc kubenswrapper[5037]: I1126 14:40:04.535827 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bmm7t\" (UniqueName: \"kubernetes.io/projected/3ae25f93-9fa1-42e0-8a13-984460bdd087-kube-api-access-bmm7t\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:04 crc kubenswrapper[5037]: I1126 14:40:04.535861 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ae25f93-9fa1-42e0-8a13-984460bdd087-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:04 crc kubenswrapper[5037]: I1126 14:40:04.535873 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ae25f93-9fa1-42e0-8a13-984460bdd087-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.155354 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-dkcpw" event={"ID":"3ae25f93-9fa1-42e0-8a13-984460bdd087","Type":"ContainerDied","Data":"20e94b4f9f734ea293ac6b6d5a80689ab8f77dcae2a4f9da1a2adef80d61c7df"} Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.155754 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="20e94b4f9f734ea293ac6b6d5a80689ab8f77dcae2a4f9da1a2adef80d61c7df" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.155455 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-dkcpw" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.352619 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-zw224"] Nov 26 14:40:05 crc kubenswrapper[5037]: E1126 14:40:05.358510 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e71c09e-69be-4196-beaf-e34c6b9880bb" containerName="dnsmasq-dns" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.358549 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e71c09e-69be-4196-beaf-e34c6b9880bb" containerName="dnsmasq-dns" Nov 26 14:40:05 crc kubenswrapper[5037]: E1126 14:40:05.358562 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="791704a5-2365-4a7a-9cb5-5512a543aab2" containerName="mariadb-database-create" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.358570 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="791704a5-2365-4a7a-9cb5-5512a543aab2" containerName="mariadb-database-create" Nov 26 14:40:05 crc kubenswrapper[5037]: E1126 14:40:05.358583 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ae25f93-9fa1-42e0-8a13-984460bdd087" containerName="keystone-db-sync" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.358594 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ae25f93-9fa1-42e0-8a13-984460bdd087" containerName="keystone-db-sync" Nov 26 14:40:05 crc kubenswrapper[5037]: E1126 14:40:05.358612 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70ae4ec3-44f5-4978-ba4a-31d762f0d748" containerName="mariadb-database-create" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.358620 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="70ae4ec3-44f5-4978-ba4a-31d762f0d748" containerName="mariadb-database-create" Nov 26 14:40:05 crc kubenswrapper[5037]: E1126 14:40:05.358637 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e71c09e-69be-4196-beaf-e34c6b9880bb" containerName="init" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.358645 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e71c09e-69be-4196-beaf-e34c6b9880bb" containerName="init" Nov 26 14:40:05 crc kubenswrapper[5037]: E1126 14:40:05.358657 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be3a8314-c6a3-4c32-982e-47a36ea01821" containerName="mariadb-account-create-update" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.358666 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="be3a8314-c6a3-4c32-982e-47a36ea01821" containerName="mariadb-account-create-update" Nov 26 14:40:05 crc kubenswrapper[5037]: E1126 14:40:05.358677 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9338ce30-fd3e-4fa1-bbc3-5f73cbba9662" containerName="mariadb-account-create-update" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.358684 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="9338ce30-fd3e-4fa1-bbc3-5f73cbba9662" containerName="mariadb-account-create-update" Nov 26 14:40:05 crc kubenswrapper[5037]: E1126 14:40:05.358699 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29d2e7ad-bebe-4d6e-8508-54dc55c7ca2e" containerName="mariadb-account-create-update" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.358707 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="29d2e7ad-bebe-4d6e-8508-54dc55c7ca2e" containerName="mariadb-account-create-update" Nov 26 14:40:05 crc kubenswrapper[5037]: E1126 14:40:05.358730 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12c2cbda-612f-4d50-a8ff-a4a893fd62ea" containerName="mariadb-database-create" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.358738 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="12c2cbda-612f-4d50-a8ff-a4a893fd62ea" containerName="mariadb-database-create" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.358938 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="791704a5-2365-4a7a-9cb5-5512a543aab2" containerName="mariadb-database-create" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.358957 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ae25f93-9fa1-42e0-8a13-984460bdd087" containerName="keystone-db-sync" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.358976 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="29d2e7ad-bebe-4d6e-8508-54dc55c7ca2e" containerName="mariadb-account-create-update" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.358990 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="70ae4ec3-44f5-4978-ba4a-31d762f0d748" containerName="mariadb-database-create" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.359004 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="be3a8314-c6a3-4c32-982e-47a36ea01821" containerName="mariadb-account-create-update" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.359011 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="9338ce30-fd3e-4fa1-bbc3-5f73cbba9662" containerName="mariadb-account-create-update" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.359033 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="12c2cbda-612f-4d50-a8ff-a4a893fd62ea" containerName="mariadb-database-create" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.359047 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e71c09e-69be-4196-beaf-e34c6b9880bb" containerName="dnsmasq-dns" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.359784 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-zw224" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.367930 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.368072 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-fpv7w" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.368236 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.368298 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.368504 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.370321 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-zw224"] Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.382919 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8c7bdb785-dx2kh"] Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.387835 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8c7bdb785-dx2kh" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.424004 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8c7bdb785-dx2kh"] Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.553490 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7c6fa03a-4f28-4823-a618-9fe4ad3925c9-credential-keys\") pod \"keystone-bootstrap-zw224\" (UID: \"7c6fa03a-4f28-4823-a618-9fe4ad3925c9\") " pod="openstack/keystone-bootstrap-zw224" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.553554 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0421669a-d794-43a0-81c7-8d5ee04afa07-dns-swift-storage-0\") pod \"dnsmasq-dns-8c7bdb785-dx2kh\" (UID: \"0421669a-d794-43a0-81c7-8d5ee04afa07\") " pod="openstack/dnsmasq-dns-8c7bdb785-dx2kh" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.553588 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0421669a-d794-43a0-81c7-8d5ee04afa07-ovsdbserver-sb\") pod \"dnsmasq-dns-8c7bdb785-dx2kh\" (UID: \"0421669a-d794-43a0-81c7-8d5ee04afa07\") " pod="openstack/dnsmasq-dns-8c7bdb785-dx2kh" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.553609 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bpj4n\" (UniqueName: \"kubernetes.io/projected/0421669a-d794-43a0-81c7-8d5ee04afa07-kube-api-access-bpj4n\") pod \"dnsmasq-dns-8c7bdb785-dx2kh\" (UID: \"0421669a-d794-43a0-81c7-8d5ee04afa07\") " pod="openstack/dnsmasq-dns-8c7bdb785-dx2kh" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.553668 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0421669a-d794-43a0-81c7-8d5ee04afa07-ovsdbserver-nb\") pod \"dnsmasq-dns-8c7bdb785-dx2kh\" (UID: \"0421669a-d794-43a0-81c7-8d5ee04afa07\") " pod="openstack/dnsmasq-dns-8c7bdb785-dx2kh" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.553713 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c6fa03a-4f28-4823-a618-9fe4ad3925c9-config-data\") pod \"keystone-bootstrap-zw224\" (UID: \"7c6fa03a-4f28-4823-a618-9fe4ad3925c9\") " pod="openstack/keystone-bootstrap-zw224" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.553744 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c6fa03a-4f28-4823-a618-9fe4ad3925c9-scripts\") pod \"keystone-bootstrap-zw224\" (UID: \"7c6fa03a-4f28-4823-a618-9fe4ad3925c9\") " pod="openstack/keystone-bootstrap-zw224" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.553768 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0421669a-d794-43a0-81c7-8d5ee04afa07-dns-svc\") pod \"dnsmasq-dns-8c7bdb785-dx2kh\" (UID: \"0421669a-d794-43a0-81c7-8d5ee04afa07\") " pod="openstack/dnsmasq-dns-8c7bdb785-dx2kh" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.553814 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0421669a-d794-43a0-81c7-8d5ee04afa07-config\") pod \"dnsmasq-dns-8c7bdb785-dx2kh\" (UID: \"0421669a-d794-43a0-81c7-8d5ee04afa07\") " pod="openstack/dnsmasq-dns-8c7bdb785-dx2kh" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.553848 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c6fa03a-4f28-4823-a618-9fe4ad3925c9-combined-ca-bundle\") pod \"keystone-bootstrap-zw224\" (UID: \"7c6fa03a-4f28-4823-a618-9fe4ad3925c9\") " pod="openstack/keystone-bootstrap-zw224" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.553901 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7c6fa03a-4f28-4823-a618-9fe4ad3925c9-fernet-keys\") pod \"keystone-bootstrap-zw224\" (UID: \"7c6fa03a-4f28-4823-a618-9fe4ad3925c9\") " pod="openstack/keystone-bootstrap-zw224" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.553940 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vwlwv\" (UniqueName: \"kubernetes.io/projected/7c6fa03a-4f28-4823-a618-9fe4ad3925c9-kube-api-access-vwlwv\") pod \"keystone-bootstrap-zw224\" (UID: \"7c6fa03a-4f28-4823-a618-9fe4ad3925c9\") " pod="openstack/keystone-bootstrap-zw224" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.576415 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-jxxp2"] Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.577757 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-jxxp2" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.580202 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.582708 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-784g4" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.582882 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.654985 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0421669a-d794-43a0-81c7-8d5ee04afa07-config\") pod \"dnsmasq-dns-8c7bdb785-dx2kh\" (UID: \"0421669a-d794-43a0-81c7-8d5ee04afa07\") " pod="openstack/dnsmasq-dns-8c7bdb785-dx2kh" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.655044 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c6fa03a-4f28-4823-a618-9fe4ad3925c9-combined-ca-bundle\") pod \"keystone-bootstrap-zw224\" (UID: \"7c6fa03a-4f28-4823-a618-9fe4ad3925c9\") " pod="openstack/keystone-bootstrap-zw224" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.655068 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7c6fa03a-4f28-4823-a618-9fe4ad3925c9-fernet-keys\") pod \"keystone-bootstrap-zw224\" (UID: \"7c6fa03a-4f28-4823-a618-9fe4ad3925c9\") " pod="openstack/keystone-bootstrap-zw224" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.655099 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vwlwv\" (UniqueName: \"kubernetes.io/projected/7c6fa03a-4f28-4823-a618-9fe4ad3925c9-kube-api-access-vwlwv\") pod \"keystone-bootstrap-zw224\" (UID: \"7c6fa03a-4f28-4823-a618-9fe4ad3925c9\") " pod="openstack/keystone-bootstrap-zw224" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.655137 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7c6fa03a-4f28-4823-a618-9fe4ad3925c9-credential-keys\") pod \"keystone-bootstrap-zw224\" (UID: \"7c6fa03a-4f28-4823-a618-9fe4ad3925c9\") " pod="openstack/keystone-bootstrap-zw224" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.655169 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0421669a-d794-43a0-81c7-8d5ee04afa07-dns-swift-storage-0\") pod \"dnsmasq-dns-8c7bdb785-dx2kh\" (UID: \"0421669a-d794-43a0-81c7-8d5ee04afa07\") " pod="openstack/dnsmasq-dns-8c7bdb785-dx2kh" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.655202 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0421669a-d794-43a0-81c7-8d5ee04afa07-ovsdbserver-sb\") pod \"dnsmasq-dns-8c7bdb785-dx2kh\" (UID: \"0421669a-d794-43a0-81c7-8d5ee04afa07\") " pod="openstack/dnsmasq-dns-8c7bdb785-dx2kh" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.655222 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bpj4n\" (UniqueName: \"kubernetes.io/projected/0421669a-d794-43a0-81c7-8d5ee04afa07-kube-api-access-bpj4n\") pod \"dnsmasq-dns-8c7bdb785-dx2kh\" (UID: \"0421669a-d794-43a0-81c7-8d5ee04afa07\") " pod="openstack/dnsmasq-dns-8c7bdb785-dx2kh" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.655266 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0421669a-d794-43a0-81c7-8d5ee04afa07-ovsdbserver-nb\") pod \"dnsmasq-dns-8c7bdb785-dx2kh\" (UID: \"0421669a-d794-43a0-81c7-8d5ee04afa07\") " pod="openstack/dnsmasq-dns-8c7bdb785-dx2kh" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.655331 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c6fa03a-4f28-4823-a618-9fe4ad3925c9-config-data\") pod \"keystone-bootstrap-zw224\" (UID: \"7c6fa03a-4f28-4823-a618-9fe4ad3925c9\") " pod="openstack/keystone-bootstrap-zw224" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.655368 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c6fa03a-4f28-4823-a618-9fe4ad3925c9-scripts\") pod \"keystone-bootstrap-zw224\" (UID: \"7c6fa03a-4f28-4823-a618-9fe4ad3925c9\") " pod="openstack/keystone-bootstrap-zw224" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.655397 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0421669a-d794-43a0-81c7-8d5ee04afa07-dns-svc\") pod \"dnsmasq-dns-8c7bdb785-dx2kh\" (UID: \"0421669a-d794-43a0-81c7-8d5ee04afa07\") " pod="openstack/dnsmasq-dns-8c7bdb785-dx2kh" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.662154 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0421669a-d794-43a0-81c7-8d5ee04afa07-ovsdbserver-nb\") pod \"dnsmasq-dns-8c7bdb785-dx2kh\" (UID: \"0421669a-d794-43a0-81c7-8d5ee04afa07\") " pod="openstack/dnsmasq-dns-8c7bdb785-dx2kh" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.671133 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0421669a-d794-43a0-81c7-8d5ee04afa07-dns-svc\") pod \"dnsmasq-dns-8c7bdb785-dx2kh\" (UID: \"0421669a-d794-43a0-81c7-8d5ee04afa07\") " pod="openstack/dnsmasq-dns-8c7bdb785-dx2kh" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.672881 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0421669a-d794-43a0-81c7-8d5ee04afa07-dns-swift-storage-0\") pod \"dnsmasq-dns-8c7bdb785-dx2kh\" (UID: \"0421669a-d794-43a0-81c7-8d5ee04afa07\") " pod="openstack/dnsmasq-dns-8c7bdb785-dx2kh" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.673680 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0421669a-d794-43a0-81c7-8d5ee04afa07-config\") pod \"dnsmasq-dns-8c7bdb785-dx2kh\" (UID: \"0421669a-d794-43a0-81c7-8d5ee04afa07\") " pod="openstack/dnsmasq-dns-8c7bdb785-dx2kh" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.685756 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7c6fa03a-4f28-4823-a618-9fe4ad3925c9-fernet-keys\") pod \"keystone-bootstrap-zw224\" (UID: \"7c6fa03a-4f28-4823-a618-9fe4ad3925c9\") " pod="openstack/keystone-bootstrap-zw224" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.686419 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c6fa03a-4f28-4823-a618-9fe4ad3925c9-config-data\") pod \"keystone-bootstrap-zw224\" (UID: \"7c6fa03a-4f28-4823-a618-9fe4ad3925c9\") " pod="openstack/keystone-bootstrap-zw224" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.688139 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-jxxp2"] Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.694917 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0421669a-d794-43a0-81c7-8d5ee04afa07-ovsdbserver-sb\") pod \"dnsmasq-dns-8c7bdb785-dx2kh\" (UID: \"0421669a-d794-43a0-81c7-8d5ee04afa07\") " pod="openstack/dnsmasq-dns-8c7bdb785-dx2kh" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.706495 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-78lfm"] Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.707881 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c6fa03a-4f28-4823-a618-9fe4ad3925c9-scripts\") pod \"keystone-bootstrap-zw224\" (UID: \"7c6fa03a-4f28-4823-a618-9fe4ad3925c9\") " pod="openstack/keystone-bootstrap-zw224" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.708795 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-78lfm"] Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.708930 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-78lfm" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.712165 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-rt4gf" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.712790 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7c6fa03a-4f28-4823-a618-9fe4ad3925c9-credential-keys\") pod \"keystone-bootstrap-zw224\" (UID: \"7c6fa03a-4f28-4823-a618-9fe4ad3925c9\") " pod="openstack/keystone-bootstrap-zw224" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.718002 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c6fa03a-4f28-4823-a618-9fe4ad3925c9-combined-ca-bundle\") pod \"keystone-bootstrap-zw224\" (UID: \"7c6fa03a-4f28-4823-a618-9fe4ad3925c9\") " pod="openstack/keystone-bootstrap-zw224" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.720267 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.720571 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.737355 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bpj4n\" (UniqueName: \"kubernetes.io/projected/0421669a-d794-43a0-81c7-8d5ee04afa07-kube-api-access-bpj4n\") pod \"dnsmasq-dns-8c7bdb785-dx2kh\" (UID: \"0421669a-d794-43a0-81c7-8d5ee04afa07\") " pod="openstack/dnsmasq-dns-8c7bdb785-dx2kh" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.748186 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8c7bdb785-dx2kh"] Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.748826 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8c7bdb785-dx2kh" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.758406 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vwlwv\" (UniqueName: \"kubernetes.io/projected/7c6fa03a-4f28-4823-a618-9fe4ad3925c9-kube-api-access-vwlwv\") pod \"keystone-bootstrap-zw224\" (UID: \"7c6fa03a-4f28-4823-a618-9fe4ad3925c9\") " pod="openstack/keystone-bootstrap-zw224" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.761787 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/50b1873a-43ee-426d-99f2-84e8267cb178-scripts\") pod \"cinder-db-sync-jxxp2\" (UID: \"50b1873a-43ee-426d-99f2-84e8267cb178\") " pod="openstack/cinder-db-sync-jxxp2" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.761922 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50b1873a-43ee-426d-99f2-84e8267cb178-combined-ca-bundle\") pod \"cinder-db-sync-jxxp2\" (UID: \"50b1873a-43ee-426d-99f2-84e8267cb178\") " pod="openstack/cinder-db-sync-jxxp2" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.762004 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50b1873a-43ee-426d-99f2-84e8267cb178-config-data\") pod \"cinder-db-sync-jxxp2\" (UID: \"50b1873a-43ee-426d-99f2-84e8267cb178\") " pod="openstack/cinder-db-sync-jxxp2" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.762048 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/50b1873a-43ee-426d-99f2-84e8267cb178-db-sync-config-data\") pod \"cinder-db-sync-jxxp2\" (UID: \"50b1873a-43ee-426d-99f2-84e8267cb178\") " pod="openstack/cinder-db-sync-jxxp2" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.762086 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jt7vt\" (UniqueName: \"kubernetes.io/projected/50b1873a-43ee-426d-99f2-84e8267cb178-kube-api-access-jt7vt\") pod \"cinder-db-sync-jxxp2\" (UID: \"50b1873a-43ee-426d-99f2-84e8267cb178\") " pod="openstack/cinder-db-sync-jxxp2" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.762185 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/50b1873a-43ee-426d-99f2-84e8267cb178-etc-machine-id\") pod \"cinder-db-sync-jxxp2\" (UID: \"50b1873a-43ee-426d-99f2-84e8267cb178\") " pod="openstack/cinder-db-sync-jxxp2" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.815148 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.826254 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.837814 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.838185 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.840550 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-gpxkh"] Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.847446 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-gpxkh" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.852617 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-pq7hs" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.853997 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.854588 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.881963 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-76c8d5b9fc-gkgbg"] Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.889388 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/78b7adc8-c410-4ccf-948a-0d968e60d8b7-config\") pod \"neutron-db-sync-78lfm\" (UID: \"78b7adc8-c410-4ccf-948a-0d968e60d8b7\") " pod="openstack/neutron-db-sync-78lfm" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.889420 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/50b1873a-43ee-426d-99f2-84e8267cb178-scripts\") pod \"cinder-db-sync-jxxp2\" (UID: \"50b1873a-43ee-426d-99f2-84e8267cb178\") " pod="openstack/cinder-db-sync-jxxp2" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.889466 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50b1873a-43ee-426d-99f2-84e8267cb178-combined-ca-bundle\") pod \"cinder-db-sync-jxxp2\" (UID: \"50b1873a-43ee-426d-99f2-84e8267cb178\") " pod="openstack/cinder-db-sync-jxxp2" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.889502 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50b1873a-43ee-426d-99f2-84e8267cb178-config-data\") pod \"cinder-db-sync-jxxp2\" (UID: \"50b1873a-43ee-426d-99f2-84e8267cb178\") " pod="openstack/cinder-db-sync-jxxp2" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.889525 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pmtj4\" (UniqueName: \"kubernetes.io/projected/78b7adc8-c410-4ccf-948a-0d968e60d8b7-kube-api-access-pmtj4\") pod \"neutron-db-sync-78lfm\" (UID: \"78b7adc8-c410-4ccf-948a-0d968e60d8b7\") " pod="openstack/neutron-db-sync-78lfm" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.889546 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/50b1873a-43ee-426d-99f2-84e8267cb178-db-sync-config-data\") pod \"cinder-db-sync-jxxp2\" (UID: \"50b1873a-43ee-426d-99f2-84e8267cb178\") " pod="openstack/cinder-db-sync-jxxp2" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.889565 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jt7vt\" (UniqueName: \"kubernetes.io/projected/50b1873a-43ee-426d-99f2-84e8267cb178-kube-api-access-jt7vt\") pod \"cinder-db-sync-jxxp2\" (UID: \"50b1873a-43ee-426d-99f2-84e8267cb178\") " pod="openstack/cinder-db-sync-jxxp2" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.889583 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78b7adc8-c410-4ccf-948a-0d968e60d8b7-combined-ca-bundle\") pod \"neutron-db-sync-78lfm\" (UID: \"78b7adc8-c410-4ccf-948a-0d968e60d8b7\") " pod="openstack/neutron-db-sync-78lfm" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.889815 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/50b1873a-43ee-426d-99f2-84e8267cb178-etc-machine-id\") pod \"cinder-db-sync-jxxp2\" (UID: \"50b1873a-43ee-426d-99f2-84e8267cb178\") " pod="openstack/cinder-db-sync-jxxp2" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.889885 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/50b1873a-43ee-426d-99f2-84e8267cb178-etc-machine-id\") pod \"cinder-db-sync-jxxp2\" (UID: \"50b1873a-43ee-426d-99f2-84e8267cb178\") " pod="openstack/cinder-db-sync-jxxp2" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.901732 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50b1873a-43ee-426d-99f2-84e8267cb178-config-data\") pod \"cinder-db-sync-jxxp2\" (UID: \"50b1873a-43ee-426d-99f2-84e8267cb178\") " pod="openstack/cinder-db-sync-jxxp2" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.901989 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/50b1873a-43ee-426d-99f2-84e8267cb178-scripts\") pod \"cinder-db-sync-jxxp2\" (UID: \"50b1873a-43ee-426d-99f2-84e8267cb178\") " pod="openstack/cinder-db-sync-jxxp2" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.902041 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50b1873a-43ee-426d-99f2-84e8267cb178-combined-ca-bundle\") pod \"cinder-db-sync-jxxp2\" (UID: \"50b1873a-43ee-426d-99f2-84e8267cb178\") " pod="openstack/cinder-db-sync-jxxp2" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.904751 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.904849 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76c8d5b9fc-gkgbg" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.907345 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-gpxkh"] Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.921097 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/50b1873a-43ee-426d-99f2-84e8267cb178-db-sync-config-data\") pod \"cinder-db-sync-jxxp2\" (UID: \"50b1873a-43ee-426d-99f2-84e8267cb178\") " pod="openstack/cinder-db-sync-jxxp2" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.925976 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jt7vt\" (UniqueName: \"kubernetes.io/projected/50b1873a-43ee-426d-99f2-84e8267cb178-kube-api-access-jt7vt\") pod \"cinder-db-sync-jxxp2\" (UID: \"50b1873a-43ee-426d-99f2-84e8267cb178\") " pod="openstack/cinder-db-sync-jxxp2" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.937446 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-76c8d5b9fc-gkgbg"] Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.948320 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-pmjr8"] Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.954651 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-pmjr8" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.959649 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-9lcbd" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.960442 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.964680 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-pmjr8"] Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.981482 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-zw224" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.990973 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/78b7adc8-c410-4ccf-948a-0d968e60d8b7-config\") pod \"neutron-db-sync-78lfm\" (UID: \"78b7adc8-c410-4ccf-948a-0d968e60d8b7\") " pod="openstack/neutron-db-sync-78lfm" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.991302 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5zzhd\" (UniqueName: \"kubernetes.io/projected/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-kube-api-access-5zzhd\") pod \"ceilometer-0\" (UID: \"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4\") " pod="openstack/ceilometer-0" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.991346 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-config-data\") pod \"ceilometer-0\" (UID: \"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4\") " pod="openstack/ceilometer-0" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.991393 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-log-httpd\") pod \"ceilometer-0\" (UID: \"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4\") " pod="openstack/ceilometer-0" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.991415 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e9b6916-0913-445b-8e5d-6a7f397dc9ba-scripts\") pod \"placement-db-sync-gpxkh\" (UID: \"8e9b6916-0913-445b-8e5d-6a7f397dc9ba\") " pod="openstack/placement-db-sync-gpxkh" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.991446 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e9b6916-0913-445b-8e5d-6a7f397dc9ba-logs\") pod \"placement-db-sync-gpxkh\" (UID: \"8e9b6916-0913-445b-8e5d-6a7f397dc9ba\") " pod="openstack/placement-db-sync-gpxkh" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.991465 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pmtj4\" (UniqueName: \"kubernetes.io/projected/78b7adc8-c410-4ccf-948a-0d968e60d8b7-kube-api-access-pmtj4\") pod \"neutron-db-sync-78lfm\" (UID: \"78b7adc8-c410-4ccf-948a-0d968e60d8b7\") " pod="openstack/neutron-db-sync-78lfm" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.991499 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78b7adc8-c410-4ccf-948a-0d968e60d8b7-combined-ca-bundle\") pod \"neutron-db-sync-78lfm\" (UID: \"78b7adc8-c410-4ccf-948a-0d968e60d8b7\") " pod="openstack/neutron-db-sync-78lfm" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.991521 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4\") " pod="openstack/ceilometer-0" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.991554 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4\") " pod="openstack/ceilometer-0" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.991578 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-scripts\") pod \"ceilometer-0\" (UID: \"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4\") " pod="openstack/ceilometer-0" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.991594 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-run-httpd\") pod \"ceilometer-0\" (UID: \"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4\") " pod="openstack/ceilometer-0" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.991611 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e9b6916-0913-445b-8e5d-6a7f397dc9ba-combined-ca-bundle\") pod \"placement-db-sync-gpxkh\" (UID: \"8e9b6916-0913-445b-8e5d-6a7f397dc9ba\") " pod="openstack/placement-db-sync-gpxkh" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.991636 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjx8d\" (UniqueName: \"kubernetes.io/projected/8e9b6916-0913-445b-8e5d-6a7f397dc9ba-kube-api-access-zjx8d\") pod \"placement-db-sync-gpxkh\" (UID: \"8e9b6916-0913-445b-8e5d-6a7f397dc9ba\") " pod="openstack/placement-db-sync-gpxkh" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.991657 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e9b6916-0913-445b-8e5d-6a7f397dc9ba-config-data\") pod \"placement-db-sync-gpxkh\" (UID: \"8e9b6916-0913-445b-8e5d-6a7f397dc9ba\") " pod="openstack/placement-db-sync-gpxkh" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.995832 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/78b7adc8-c410-4ccf-948a-0d968e60d8b7-config\") pod \"neutron-db-sync-78lfm\" (UID: \"78b7adc8-c410-4ccf-948a-0d968e60d8b7\") " pod="openstack/neutron-db-sync-78lfm" Nov 26 14:40:05 crc kubenswrapper[5037]: I1126 14:40:05.996298 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78b7adc8-c410-4ccf-948a-0d968e60d8b7-combined-ca-bundle\") pod \"neutron-db-sync-78lfm\" (UID: \"78b7adc8-c410-4ccf-948a-0d968e60d8b7\") " pod="openstack/neutron-db-sync-78lfm" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.016959 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pmtj4\" (UniqueName: \"kubernetes.io/projected/78b7adc8-c410-4ccf-948a-0d968e60d8b7-kube-api-access-pmtj4\") pod \"neutron-db-sync-78lfm\" (UID: \"78b7adc8-c410-4ccf-948a-0d968e60d8b7\") " pod="openstack/neutron-db-sync-78lfm" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.053061 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-mgn9v" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.093744 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e9b6916-0913-445b-8e5d-6a7f397dc9ba-scripts\") pod \"placement-db-sync-gpxkh\" (UID: \"8e9b6916-0913-445b-8e5d-6a7f397dc9ba\") " pod="openstack/placement-db-sync-gpxkh" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.093799 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9mlwm\" (UniqueName: \"kubernetes.io/projected/5141f29f-7b8b-493c-9d73-398f66ea4ab1-kube-api-access-9mlwm\") pod \"barbican-db-sync-pmjr8\" (UID: \"5141f29f-7b8b-493c-9d73-398f66ea4ab1\") " pod="openstack/barbican-db-sync-pmjr8" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.093821 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e9b6916-0913-445b-8e5d-6a7f397dc9ba-logs\") pod \"placement-db-sync-gpxkh\" (UID: \"8e9b6916-0913-445b-8e5d-6a7f397dc9ba\") " pod="openstack/placement-db-sync-gpxkh" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.093864 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4\") " pod="openstack/ceilometer-0" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.093900 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4\") " pod="openstack/ceilometer-0" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.093917 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z4qfg\" (UniqueName: \"kubernetes.io/projected/dbe07020-c0c8-4d44-9bda-2683282d178d-kube-api-access-z4qfg\") pod \"dnsmasq-dns-76c8d5b9fc-gkgbg\" (UID: \"dbe07020-c0c8-4d44-9bda-2683282d178d\") " pod="openstack/dnsmasq-dns-76c8d5b9fc-gkgbg" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.093952 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dbe07020-c0c8-4d44-9bda-2683282d178d-dns-svc\") pod \"dnsmasq-dns-76c8d5b9fc-gkgbg\" (UID: \"dbe07020-c0c8-4d44-9bda-2683282d178d\") " pod="openstack/dnsmasq-dns-76c8d5b9fc-gkgbg" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.093968 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-scripts\") pod \"ceilometer-0\" (UID: \"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4\") " pod="openstack/ceilometer-0" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.093982 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-run-httpd\") pod \"ceilometer-0\" (UID: \"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4\") " pod="openstack/ceilometer-0" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.093996 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e9b6916-0913-445b-8e5d-6a7f397dc9ba-combined-ca-bundle\") pod \"placement-db-sync-gpxkh\" (UID: \"8e9b6916-0913-445b-8e5d-6a7f397dc9ba\") " pod="openstack/placement-db-sync-gpxkh" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.094013 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dbe07020-c0c8-4d44-9bda-2683282d178d-ovsdbserver-sb\") pod \"dnsmasq-dns-76c8d5b9fc-gkgbg\" (UID: \"dbe07020-c0c8-4d44-9bda-2683282d178d\") " pod="openstack/dnsmasq-dns-76c8d5b9fc-gkgbg" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.094030 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dbe07020-c0c8-4d44-9bda-2683282d178d-dns-swift-storage-0\") pod \"dnsmasq-dns-76c8d5b9fc-gkgbg\" (UID: \"dbe07020-c0c8-4d44-9bda-2683282d178d\") " pod="openstack/dnsmasq-dns-76c8d5b9fc-gkgbg" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.094078 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjx8d\" (UniqueName: \"kubernetes.io/projected/8e9b6916-0913-445b-8e5d-6a7f397dc9ba-kube-api-access-zjx8d\") pod \"placement-db-sync-gpxkh\" (UID: \"8e9b6916-0913-445b-8e5d-6a7f397dc9ba\") " pod="openstack/placement-db-sync-gpxkh" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.094093 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/5141f29f-7b8b-493c-9d73-398f66ea4ab1-db-sync-config-data\") pod \"barbican-db-sync-pmjr8\" (UID: \"5141f29f-7b8b-493c-9d73-398f66ea4ab1\") " pod="openstack/barbican-db-sync-pmjr8" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.094114 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e9b6916-0913-445b-8e5d-6a7f397dc9ba-config-data\") pod \"placement-db-sync-gpxkh\" (UID: \"8e9b6916-0913-445b-8e5d-6a7f397dc9ba\") " pod="openstack/placement-db-sync-gpxkh" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.094131 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5141f29f-7b8b-493c-9d73-398f66ea4ab1-combined-ca-bundle\") pod \"barbican-db-sync-pmjr8\" (UID: \"5141f29f-7b8b-493c-9d73-398f66ea4ab1\") " pod="openstack/barbican-db-sync-pmjr8" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.094169 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5zzhd\" (UniqueName: \"kubernetes.io/projected/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-kube-api-access-5zzhd\") pod \"ceilometer-0\" (UID: \"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4\") " pod="openstack/ceilometer-0" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.094218 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-config-data\") pod \"ceilometer-0\" (UID: \"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4\") " pod="openstack/ceilometer-0" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.094234 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dbe07020-c0c8-4d44-9bda-2683282d178d-config\") pod \"dnsmasq-dns-76c8d5b9fc-gkgbg\" (UID: \"dbe07020-c0c8-4d44-9bda-2683282d178d\") " pod="openstack/dnsmasq-dns-76c8d5b9fc-gkgbg" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.094271 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dbe07020-c0c8-4d44-9bda-2683282d178d-ovsdbserver-nb\") pod \"dnsmasq-dns-76c8d5b9fc-gkgbg\" (UID: \"dbe07020-c0c8-4d44-9bda-2683282d178d\") " pod="openstack/dnsmasq-dns-76c8d5b9fc-gkgbg" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.094305 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-log-httpd\") pod \"ceilometer-0\" (UID: \"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4\") " pod="openstack/ceilometer-0" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.094697 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-log-httpd\") pod \"ceilometer-0\" (UID: \"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4\") " pod="openstack/ceilometer-0" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.094712 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e9b6916-0913-445b-8e5d-6a7f397dc9ba-logs\") pod \"placement-db-sync-gpxkh\" (UID: \"8e9b6916-0913-445b-8e5d-6a7f397dc9ba\") " pod="openstack/placement-db-sync-gpxkh" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.096614 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-run-httpd\") pod \"ceilometer-0\" (UID: \"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4\") " pod="openstack/ceilometer-0" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.098696 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e9b6916-0913-445b-8e5d-6a7f397dc9ba-scripts\") pod \"placement-db-sync-gpxkh\" (UID: \"8e9b6916-0913-445b-8e5d-6a7f397dc9ba\") " pod="openstack/placement-db-sync-gpxkh" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.099775 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4\") " pod="openstack/ceilometer-0" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.100348 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4\") " pod="openstack/ceilometer-0" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.101466 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e9b6916-0913-445b-8e5d-6a7f397dc9ba-combined-ca-bundle\") pod \"placement-db-sync-gpxkh\" (UID: \"8e9b6916-0913-445b-8e5d-6a7f397dc9ba\") " pod="openstack/placement-db-sync-gpxkh" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.102164 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-scripts\") pod \"ceilometer-0\" (UID: \"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4\") " pod="openstack/ceilometer-0" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.112791 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e9b6916-0913-445b-8e5d-6a7f397dc9ba-config-data\") pod \"placement-db-sync-gpxkh\" (UID: \"8e9b6916-0913-445b-8e5d-6a7f397dc9ba\") " pod="openstack/placement-db-sync-gpxkh" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.114631 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-config-data\") pod \"ceilometer-0\" (UID: \"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4\") " pod="openstack/ceilometer-0" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.117918 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5zzhd\" (UniqueName: \"kubernetes.io/projected/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-kube-api-access-5zzhd\") pod \"ceilometer-0\" (UID: \"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4\") " pod="openstack/ceilometer-0" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.119028 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjx8d\" (UniqueName: \"kubernetes.io/projected/8e9b6916-0913-445b-8e5d-6a7f397dc9ba-kube-api-access-zjx8d\") pod \"placement-db-sync-gpxkh\" (UID: \"8e9b6916-0913-445b-8e5d-6a7f397dc9ba\") " pod="openstack/placement-db-sync-gpxkh" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.170902 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-mgn9v" event={"ID":"2369115c-ae08-42b0-af64-c42191c04502","Type":"ContainerDied","Data":"b5df84b55d88403cf2b629f4b47dc681b4f9e39ad9f86bafa991315acb3bca72"} Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.171281 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b5df84b55d88403cf2b629f4b47dc681b4f9e39ad9f86bafa991315acb3bca72" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.171102 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-mgn9v" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.196162 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2369115c-ae08-42b0-af64-c42191c04502-combined-ca-bundle\") pod \"2369115c-ae08-42b0-af64-c42191c04502\" (UID: \"2369115c-ae08-42b0-af64-c42191c04502\") " Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.196277 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vnp75\" (UniqueName: \"kubernetes.io/projected/2369115c-ae08-42b0-af64-c42191c04502-kube-api-access-vnp75\") pod \"2369115c-ae08-42b0-af64-c42191c04502\" (UID: \"2369115c-ae08-42b0-af64-c42191c04502\") " Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.196365 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2369115c-ae08-42b0-af64-c42191c04502-db-sync-config-data\") pod \"2369115c-ae08-42b0-af64-c42191c04502\" (UID: \"2369115c-ae08-42b0-af64-c42191c04502\") " Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.196462 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2369115c-ae08-42b0-af64-c42191c04502-config-data\") pod \"2369115c-ae08-42b0-af64-c42191c04502\" (UID: \"2369115c-ae08-42b0-af64-c42191c04502\") " Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.196678 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dbe07020-c0c8-4d44-9bda-2683282d178d-dns-svc\") pod \"dnsmasq-dns-76c8d5b9fc-gkgbg\" (UID: \"dbe07020-c0c8-4d44-9bda-2683282d178d\") " pod="openstack/dnsmasq-dns-76c8d5b9fc-gkgbg" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.196714 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dbe07020-c0c8-4d44-9bda-2683282d178d-ovsdbserver-sb\") pod \"dnsmasq-dns-76c8d5b9fc-gkgbg\" (UID: \"dbe07020-c0c8-4d44-9bda-2683282d178d\") " pod="openstack/dnsmasq-dns-76c8d5b9fc-gkgbg" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.196732 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dbe07020-c0c8-4d44-9bda-2683282d178d-dns-swift-storage-0\") pod \"dnsmasq-dns-76c8d5b9fc-gkgbg\" (UID: \"dbe07020-c0c8-4d44-9bda-2683282d178d\") " pod="openstack/dnsmasq-dns-76c8d5b9fc-gkgbg" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.196751 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/5141f29f-7b8b-493c-9d73-398f66ea4ab1-db-sync-config-data\") pod \"barbican-db-sync-pmjr8\" (UID: \"5141f29f-7b8b-493c-9d73-398f66ea4ab1\") " pod="openstack/barbican-db-sync-pmjr8" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.196795 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5141f29f-7b8b-493c-9d73-398f66ea4ab1-combined-ca-bundle\") pod \"barbican-db-sync-pmjr8\" (UID: \"5141f29f-7b8b-493c-9d73-398f66ea4ab1\") " pod="openstack/barbican-db-sync-pmjr8" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.196870 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dbe07020-c0c8-4d44-9bda-2683282d178d-config\") pod \"dnsmasq-dns-76c8d5b9fc-gkgbg\" (UID: \"dbe07020-c0c8-4d44-9bda-2683282d178d\") " pod="openstack/dnsmasq-dns-76c8d5b9fc-gkgbg" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.196904 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dbe07020-c0c8-4d44-9bda-2683282d178d-ovsdbserver-nb\") pod \"dnsmasq-dns-76c8d5b9fc-gkgbg\" (UID: \"dbe07020-c0c8-4d44-9bda-2683282d178d\") " pod="openstack/dnsmasq-dns-76c8d5b9fc-gkgbg" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.196942 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9mlwm\" (UniqueName: \"kubernetes.io/projected/5141f29f-7b8b-493c-9d73-398f66ea4ab1-kube-api-access-9mlwm\") pod \"barbican-db-sync-pmjr8\" (UID: \"5141f29f-7b8b-493c-9d73-398f66ea4ab1\") " pod="openstack/barbican-db-sync-pmjr8" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.197027 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z4qfg\" (UniqueName: \"kubernetes.io/projected/dbe07020-c0c8-4d44-9bda-2683282d178d-kube-api-access-z4qfg\") pod \"dnsmasq-dns-76c8d5b9fc-gkgbg\" (UID: \"dbe07020-c0c8-4d44-9bda-2683282d178d\") " pod="openstack/dnsmasq-dns-76c8d5b9fc-gkgbg" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.198175 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dbe07020-c0c8-4d44-9bda-2683282d178d-dns-svc\") pod \"dnsmasq-dns-76c8d5b9fc-gkgbg\" (UID: \"dbe07020-c0c8-4d44-9bda-2683282d178d\") " pod="openstack/dnsmasq-dns-76c8d5b9fc-gkgbg" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.199002 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dbe07020-c0c8-4d44-9bda-2683282d178d-ovsdbserver-sb\") pod \"dnsmasq-dns-76c8d5b9fc-gkgbg\" (UID: \"dbe07020-c0c8-4d44-9bda-2683282d178d\") " pod="openstack/dnsmasq-dns-76c8d5b9fc-gkgbg" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.199744 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dbe07020-c0c8-4d44-9bda-2683282d178d-config\") pod \"dnsmasq-dns-76c8d5b9fc-gkgbg\" (UID: \"dbe07020-c0c8-4d44-9bda-2683282d178d\") " pod="openstack/dnsmasq-dns-76c8d5b9fc-gkgbg" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.199762 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dbe07020-c0c8-4d44-9bda-2683282d178d-dns-swift-storage-0\") pod \"dnsmasq-dns-76c8d5b9fc-gkgbg\" (UID: \"dbe07020-c0c8-4d44-9bda-2683282d178d\") " pod="openstack/dnsmasq-dns-76c8d5b9fc-gkgbg" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.200533 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dbe07020-c0c8-4d44-9bda-2683282d178d-ovsdbserver-nb\") pod \"dnsmasq-dns-76c8d5b9fc-gkgbg\" (UID: \"dbe07020-c0c8-4d44-9bda-2683282d178d\") " pod="openstack/dnsmasq-dns-76c8d5b9fc-gkgbg" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.202035 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2369115c-ae08-42b0-af64-c42191c04502-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "2369115c-ae08-42b0-af64-c42191c04502" (UID: "2369115c-ae08-42b0-af64-c42191c04502"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.204504 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5141f29f-7b8b-493c-9d73-398f66ea4ab1-combined-ca-bundle\") pod \"barbican-db-sync-pmjr8\" (UID: \"5141f29f-7b8b-493c-9d73-398f66ea4ab1\") " pod="openstack/barbican-db-sync-pmjr8" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.208493 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/5141f29f-7b8b-493c-9d73-398f66ea4ab1-db-sync-config-data\") pod \"barbican-db-sync-pmjr8\" (UID: \"5141f29f-7b8b-493c-9d73-398f66ea4ab1\") " pod="openstack/barbican-db-sync-pmjr8" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.208950 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2369115c-ae08-42b0-af64-c42191c04502-kube-api-access-vnp75" (OuterVolumeSpecName: "kube-api-access-vnp75") pod "2369115c-ae08-42b0-af64-c42191c04502" (UID: "2369115c-ae08-42b0-af64-c42191c04502"). InnerVolumeSpecName "kube-api-access-vnp75". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.211204 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-jxxp2" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.224763 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z4qfg\" (UniqueName: \"kubernetes.io/projected/dbe07020-c0c8-4d44-9bda-2683282d178d-kube-api-access-z4qfg\") pod \"dnsmasq-dns-76c8d5b9fc-gkgbg\" (UID: \"dbe07020-c0c8-4d44-9bda-2683282d178d\") " pod="openstack/dnsmasq-dns-76c8d5b9fc-gkgbg" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.226950 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9mlwm\" (UniqueName: \"kubernetes.io/projected/5141f29f-7b8b-493c-9d73-398f66ea4ab1-kube-api-access-9mlwm\") pod \"barbican-db-sync-pmjr8\" (UID: \"5141f29f-7b8b-493c-9d73-398f66ea4ab1\") " pod="openstack/barbican-db-sync-pmjr8" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.229491 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2369115c-ae08-42b0-af64-c42191c04502-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2369115c-ae08-42b0-af64-c42191c04502" (UID: "2369115c-ae08-42b0-af64-c42191c04502"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.254389 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2369115c-ae08-42b0-af64-c42191c04502-config-data" (OuterVolumeSpecName: "config-data") pod "2369115c-ae08-42b0-af64-c42191c04502" (UID: "2369115c-ae08-42b0-af64-c42191c04502"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.298433 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2369115c-ae08-42b0-af64-c42191c04502-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.298473 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vnp75\" (UniqueName: \"kubernetes.io/projected/2369115c-ae08-42b0-af64-c42191c04502-kube-api-access-vnp75\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.298489 5037 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2369115c-ae08-42b0-af64-c42191c04502-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.298501 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2369115c-ae08-42b0-af64-c42191c04502-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.310614 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-78lfm" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.336687 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.342770 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-gpxkh" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.365714 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76c8d5b9fc-gkgbg" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.379145 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-pmjr8" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.442584 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8c7bdb785-dx2kh"] Nov 26 14:40:06 crc kubenswrapper[5037]: W1126 14:40:06.550570 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0421669a_d794_43a0_81c7_8d5ee04afa07.slice/crio-7b95faaabf749ca6992873bdf9d2eb5bfeb3d08f68187004a2c98a2be4c700fc WatchSource:0}: Error finding container 7b95faaabf749ca6992873bdf9d2eb5bfeb3d08f68187004a2c98a2be4c700fc: Status 404 returned error can't find the container with id 7b95faaabf749ca6992873bdf9d2eb5bfeb3d08f68187004a2c98a2be4c700fc Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.555230 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-zw224"] Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.654094 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-76c8d5b9fc-gkgbg"] Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.705109 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-798745f775-n9xtc"] Nov 26 14:40:06 crc kubenswrapper[5037]: E1126 14:40:06.711908 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2369115c-ae08-42b0-af64-c42191c04502" containerName="glance-db-sync" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.711943 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="2369115c-ae08-42b0-af64-c42191c04502" containerName="glance-db-sync" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.712221 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="2369115c-ae08-42b0-af64-c42191c04502" containerName="glance-db-sync" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.713198 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-798745f775-n9xtc" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.776347 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-798745f775-n9xtc"] Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.826357 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/de74d309-5e9e-4cd1-8d1a-c136ac9fa51c-ovsdbserver-sb\") pod \"dnsmasq-dns-798745f775-n9xtc\" (UID: \"de74d309-5e9e-4cd1-8d1a-c136ac9fa51c\") " pod="openstack/dnsmasq-dns-798745f775-n9xtc" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.826751 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/de74d309-5e9e-4cd1-8d1a-c136ac9fa51c-dns-swift-storage-0\") pod \"dnsmasq-dns-798745f775-n9xtc\" (UID: \"de74d309-5e9e-4cd1-8d1a-c136ac9fa51c\") " pod="openstack/dnsmasq-dns-798745f775-n9xtc" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.826820 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/de74d309-5e9e-4cd1-8d1a-c136ac9fa51c-dns-svc\") pod \"dnsmasq-dns-798745f775-n9xtc\" (UID: \"de74d309-5e9e-4cd1-8d1a-c136ac9fa51c\") " pod="openstack/dnsmasq-dns-798745f775-n9xtc" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.826872 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kfz6m\" (UniqueName: \"kubernetes.io/projected/de74d309-5e9e-4cd1-8d1a-c136ac9fa51c-kube-api-access-kfz6m\") pod \"dnsmasq-dns-798745f775-n9xtc\" (UID: \"de74d309-5e9e-4cd1-8d1a-c136ac9fa51c\") " pod="openstack/dnsmasq-dns-798745f775-n9xtc" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.826961 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/de74d309-5e9e-4cd1-8d1a-c136ac9fa51c-ovsdbserver-nb\") pod \"dnsmasq-dns-798745f775-n9xtc\" (UID: \"de74d309-5e9e-4cd1-8d1a-c136ac9fa51c\") " pod="openstack/dnsmasq-dns-798745f775-n9xtc" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.826993 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/de74d309-5e9e-4cd1-8d1a-c136ac9fa51c-config\") pod \"dnsmasq-dns-798745f775-n9xtc\" (UID: \"de74d309-5e9e-4cd1-8d1a-c136ac9fa51c\") " pod="openstack/dnsmasq-dns-798745f775-n9xtc" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.931871 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/de74d309-5e9e-4cd1-8d1a-c136ac9fa51c-dns-swift-storage-0\") pod \"dnsmasq-dns-798745f775-n9xtc\" (UID: \"de74d309-5e9e-4cd1-8d1a-c136ac9fa51c\") " pod="openstack/dnsmasq-dns-798745f775-n9xtc" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.931948 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/de74d309-5e9e-4cd1-8d1a-c136ac9fa51c-dns-svc\") pod \"dnsmasq-dns-798745f775-n9xtc\" (UID: \"de74d309-5e9e-4cd1-8d1a-c136ac9fa51c\") " pod="openstack/dnsmasq-dns-798745f775-n9xtc" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.932001 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kfz6m\" (UniqueName: \"kubernetes.io/projected/de74d309-5e9e-4cd1-8d1a-c136ac9fa51c-kube-api-access-kfz6m\") pod \"dnsmasq-dns-798745f775-n9xtc\" (UID: \"de74d309-5e9e-4cd1-8d1a-c136ac9fa51c\") " pod="openstack/dnsmasq-dns-798745f775-n9xtc" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.932078 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/de74d309-5e9e-4cd1-8d1a-c136ac9fa51c-ovsdbserver-nb\") pod \"dnsmasq-dns-798745f775-n9xtc\" (UID: \"de74d309-5e9e-4cd1-8d1a-c136ac9fa51c\") " pod="openstack/dnsmasq-dns-798745f775-n9xtc" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.932104 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/de74d309-5e9e-4cd1-8d1a-c136ac9fa51c-config\") pod \"dnsmasq-dns-798745f775-n9xtc\" (UID: \"de74d309-5e9e-4cd1-8d1a-c136ac9fa51c\") " pod="openstack/dnsmasq-dns-798745f775-n9xtc" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.932155 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/de74d309-5e9e-4cd1-8d1a-c136ac9fa51c-ovsdbserver-sb\") pod \"dnsmasq-dns-798745f775-n9xtc\" (UID: \"de74d309-5e9e-4cd1-8d1a-c136ac9fa51c\") " pod="openstack/dnsmasq-dns-798745f775-n9xtc" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.934030 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/de74d309-5e9e-4cd1-8d1a-c136ac9fa51c-dns-swift-storage-0\") pod \"dnsmasq-dns-798745f775-n9xtc\" (UID: \"de74d309-5e9e-4cd1-8d1a-c136ac9fa51c\") " pod="openstack/dnsmasq-dns-798745f775-n9xtc" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.935676 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/de74d309-5e9e-4cd1-8d1a-c136ac9fa51c-ovsdbserver-nb\") pod \"dnsmasq-dns-798745f775-n9xtc\" (UID: \"de74d309-5e9e-4cd1-8d1a-c136ac9fa51c\") " pod="openstack/dnsmasq-dns-798745f775-n9xtc" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.937511 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/de74d309-5e9e-4cd1-8d1a-c136ac9fa51c-ovsdbserver-sb\") pod \"dnsmasq-dns-798745f775-n9xtc\" (UID: \"de74d309-5e9e-4cd1-8d1a-c136ac9fa51c\") " pod="openstack/dnsmasq-dns-798745f775-n9xtc" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.937769 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/de74d309-5e9e-4cd1-8d1a-c136ac9fa51c-config\") pod \"dnsmasq-dns-798745f775-n9xtc\" (UID: \"de74d309-5e9e-4cd1-8d1a-c136ac9fa51c\") " pod="openstack/dnsmasq-dns-798745f775-n9xtc" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.940564 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/de74d309-5e9e-4cd1-8d1a-c136ac9fa51c-dns-svc\") pod \"dnsmasq-dns-798745f775-n9xtc\" (UID: \"de74d309-5e9e-4cd1-8d1a-c136ac9fa51c\") " pod="openstack/dnsmasq-dns-798745f775-n9xtc" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.957121 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kfz6m\" (UniqueName: \"kubernetes.io/projected/de74d309-5e9e-4cd1-8d1a-c136ac9fa51c-kube-api-access-kfz6m\") pod \"dnsmasq-dns-798745f775-n9xtc\" (UID: \"de74d309-5e9e-4cd1-8d1a-c136ac9fa51c\") " pod="openstack/dnsmasq-dns-798745f775-n9xtc" Nov 26 14:40:06 crc kubenswrapper[5037]: I1126 14:40:06.962786 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-798745f775-n9xtc" Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.067927 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-jxxp2"] Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.184689 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-zw224" event={"ID":"7c6fa03a-4f28-4823-a618-9fe4ad3925c9","Type":"ContainerStarted","Data":"d295bd5980be12914a1113b499bc78a5ddf224e91ffdae94fa34981fed438d54"} Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.186003 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-jxxp2" event={"ID":"50b1873a-43ee-426d-99f2-84e8267cb178","Type":"ContainerStarted","Data":"21dbbaa10727f20e523901b7474c1b27fdf7c8abd4e5a9c9cda87d97bb2eb29c"} Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.189108 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8c7bdb785-dx2kh" event={"ID":"0421669a-d794-43a0-81c7-8d5ee04afa07","Type":"ContainerStarted","Data":"7b95faaabf749ca6992873bdf9d2eb5bfeb3d08f68187004a2c98a2be4c700fc"} Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.293857 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-78lfm"] Nov 26 14:40:07 crc kubenswrapper[5037]: W1126 14:40:07.297841 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod78b7adc8_c410_4ccf_948a_0d968e60d8b7.slice/crio-a142a543ef1470e50dbf56002053c49af3bc0ad8110f56bca7196341b4c7b2bb WatchSource:0}: Error finding container a142a543ef1470e50dbf56002053c49af3bc0ad8110f56bca7196341b4c7b2bb: Status 404 returned error can't find the container with id a142a543ef1470e50dbf56002053c49af3bc0ad8110f56bca7196341b4c7b2bb Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.413540 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.442028 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-gpxkh"] Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.479495 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.481054 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.487202 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-6flkn" Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.487775 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.490469 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.495888 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.556344 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-pmjr8"] Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.566137 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-76c8d5b9fc-gkgbg"] Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.663216 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c8c3449b-4e23-4eab-99f2-0a4df9052607-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c8c3449b-4e23-4eab-99f2-0a4df9052607\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.663307 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c8c3449b-4e23-4eab-99f2-0a4df9052607-scripts\") pod \"glance-default-external-api-0\" (UID: \"c8c3449b-4e23-4eab-99f2-0a4df9052607\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.663396 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8c3449b-4e23-4eab-99f2-0a4df9052607-config-data\") pod \"glance-default-external-api-0\" (UID: \"c8c3449b-4e23-4eab-99f2-0a4df9052607\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.663483 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wg7nj\" (UniqueName: \"kubernetes.io/projected/c8c3449b-4e23-4eab-99f2-0a4df9052607-kube-api-access-wg7nj\") pod \"glance-default-external-api-0\" (UID: \"c8c3449b-4e23-4eab-99f2-0a4df9052607\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.663527 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c8c3449b-4e23-4eab-99f2-0a4df9052607-logs\") pod \"glance-default-external-api-0\" (UID: \"c8c3449b-4e23-4eab-99f2-0a4df9052607\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.663564 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8c3449b-4e23-4eab-99f2-0a4df9052607-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c8c3449b-4e23-4eab-99f2-0a4df9052607\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.663730 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"c8c3449b-4e23-4eab-99f2-0a4df9052607\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.668947 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-798745f775-n9xtc"] Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.764892 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c8c3449b-4e23-4eab-99f2-0a4df9052607-logs\") pod \"glance-default-external-api-0\" (UID: \"c8c3449b-4e23-4eab-99f2-0a4df9052607\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.764940 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8c3449b-4e23-4eab-99f2-0a4df9052607-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c8c3449b-4e23-4eab-99f2-0a4df9052607\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.764974 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"c8c3449b-4e23-4eab-99f2-0a4df9052607\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.765014 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c8c3449b-4e23-4eab-99f2-0a4df9052607-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c8c3449b-4e23-4eab-99f2-0a4df9052607\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.765061 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c8c3449b-4e23-4eab-99f2-0a4df9052607-scripts\") pod \"glance-default-external-api-0\" (UID: \"c8c3449b-4e23-4eab-99f2-0a4df9052607\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.765086 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8c3449b-4e23-4eab-99f2-0a4df9052607-config-data\") pod \"glance-default-external-api-0\" (UID: \"c8c3449b-4e23-4eab-99f2-0a4df9052607\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.765111 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wg7nj\" (UniqueName: \"kubernetes.io/projected/c8c3449b-4e23-4eab-99f2-0a4df9052607-kube-api-access-wg7nj\") pod \"glance-default-external-api-0\" (UID: \"c8c3449b-4e23-4eab-99f2-0a4df9052607\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.765264 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c8c3449b-4e23-4eab-99f2-0a4df9052607-logs\") pod \"glance-default-external-api-0\" (UID: \"c8c3449b-4e23-4eab-99f2-0a4df9052607\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.765411 5037 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"c8c3449b-4e23-4eab-99f2-0a4df9052607\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/glance-default-external-api-0" Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.765534 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c8c3449b-4e23-4eab-99f2-0a4df9052607-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c8c3449b-4e23-4eab-99f2-0a4df9052607\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.770232 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8c3449b-4e23-4eab-99f2-0a4df9052607-config-data\") pod \"glance-default-external-api-0\" (UID: \"c8c3449b-4e23-4eab-99f2-0a4df9052607\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.771969 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8c3449b-4e23-4eab-99f2-0a4df9052607-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c8c3449b-4e23-4eab-99f2-0a4df9052607\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.780053 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c8c3449b-4e23-4eab-99f2-0a4df9052607-scripts\") pod \"glance-default-external-api-0\" (UID: \"c8c3449b-4e23-4eab-99f2-0a4df9052607\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.781723 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wg7nj\" (UniqueName: \"kubernetes.io/projected/c8c3449b-4e23-4eab-99f2-0a4df9052607-kube-api-access-wg7nj\") pod \"glance-default-external-api-0\" (UID: \"c8c3449b-4e23-4eab-99f2-0a4df9052607\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.790532 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"c8c3449b-4e23-4eab-99f2-0a4df9052607\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.837316 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.932907 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.934926 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.937639 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 26 14:40:07 crc kubenswrapper[5037]: I1126 14:40:07.942189 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.073519 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"b3270107-a4df-4c0f-841d-d018a9524d10\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.073572 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qkbmq\" (UniqueName: \"kubernetes.io/projected/b3270107-a4df-4c0f-841d-d018a9524d10-kube-api-access-qkbmq\") pod \"glance-default-internal-api-0\" (UID: \"b3270107-a4df-4c0f-841d-d018a9524d10\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.073603 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3270107-a4df-4c0f-841d-d018a9524d10-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"b3270107-a4df-4c0f-841d-d018a9524d10\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.073647 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b3270107-a4df-4c0f-841d-d018a9524d10-logs\") pod \"glance-default-internal-api-0\" (UID: \"b3270107-a4df-4c0f-841d-d018a9524d10\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.073675 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3270107-a4df-4c0f-841d-d018a9524d10-scripts\") pod \"glance-default-internal-api-0\" (UID: \"b3270107-a4df-4c0f-841d-d018a9524d10\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.073706 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3270107-a4df-4c0f-841d-d018a9524d10-config-data\") pod \"glance-default-internal-api-0\" (UID: \"b3270107-a4df-4c0f-841d-d018a9524d10\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.073745 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b3270107-a4df-4c0f-841d-d018a9524d10-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"b3270107-a4df-4c0f-841d-d018a9524d10\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.175330 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"b3270107-a4df-4c0f-841d-d018a9524d10\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.175608 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qkbmq\" (UniqueName: \"kubernetes.io/projected/b3270107-a4df-4c0f-841d-d018a9524d10-kube-api-access-qkbmq\") pod \"glance-default-internal-api-0\" (UID: \"b3270107-a4df-4c0f-841d-d018a9524d10\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.175652 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3270107-a4df-4c0f-841d-d018a9524d10-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"b3270107-a4df-4c0f-841d-d018a9524d10\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.175722 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b3270107-a4df-4c0f-841d-d018a9524d10-logs\") pod \"glance-default-internal-api-0\" (UID: \"b3270107-a4df-4c0f-841d-d018a9524d10\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.175747 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3270107-a4df-4c0f-841d-d018a9524d10-scripts\") pod \"glance-default-internal-api-0\" (UID: \"b3270107-a4df-4c0f-841d-d018a9524d10\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.175760 5037 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"b3270107-a4df-4c0f-841d-d018a9524d10\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/glance-default-internal-api-0" Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.176501 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b3270107-a4df-4c0f-841d-d018a9524d10-logs\") pod \"glance-default-internal-api-0\" (UID: \"b3270107-a4df-4c0f-841d-d018a9524d10\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.176913 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3270107-a4df-4c0f-841d-d018a9524d10-config-data\") pod \"glance-default-internal-api-0\" (UID: \"b3270107-a4df-4c0f-841d-d018a9524d10\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.177040 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b3270107-a4df-4c0f-841d-d018a9524d10-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"b3270107-a4df-4c0f-841d-d018a9524d10\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.177607 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b3270107-a4df-4c0f-841d-d018a9524d10-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"b3270107-a4df-4c0f-841d-d018a9524d10\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.181549 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3270107-a4df-4c0f-841d-d018a9524d10-scripts\") pod \"glance-default-internal-api-0\" (UID: \"b3270107-a4df-4c0f-841d-d018a9524d10\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.183597 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3270107-a4df-4c0f-841d-d018a9524d10-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"b3270107-a4df-4c0f-841d-d018a9524d10\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.185005 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3270107-a4df-4c0f-841d-d018a9524d10-config-data\") pod \"glance-default-internal-api-0\" (UID: \"b3270107-a4df-4c0f-841d-d018a9524d10\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.199184 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qkbmq\" (UniqueName: \"kubernetes.io/projected/b3270107-a4df-4c0f-841d-d018a9524d10-kube-api-access-qkbmq\") pod \"glance-default-internal-api-0\" (UID: \"b3270107-a4df-4c0f-841d-d018a9524d10\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.202577 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-pmjr8" event={"ID":"5141f29f-7b8b-493c-9d73-398f66ea4ab1","Type":"ContainerStarted","Data":"fec0b63a07134a23977bcb47b2ca874361008c6c753a0938700b4f7b6cb98776"} Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.203848 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-78lfm" event={"ID":"78b7adc8-c410-4ccf-948a-0d968e60d8b7","Type":"ContainerStarted","Data":"054c2f78fc498fecc3b64d7998923b3399f2ef4cd85d88ab4a737286fbb32ff5"} Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.203875 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-78lfm" event={"ID":"78b7adc8-c410-4ccf-948a-0d968e60d8b7","Type":"ContainerStarted","Data":"a142a543ef1470e50dbf56002053c49af3bc0ad8110f56bca7196341b4c7b2bb"} Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.212264 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-gpxkh" event={"ID":"8e9b6916-0913-445b-8e5d-6a7f397dc9ba","Type":"ContainerStarted","Data":"a81060b2f94a533fd9c05f8cb34fef8288c00fe60272b528c8b99c10ad97a836"} Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.215175 5037 generic.go:334] "Generic (PLEG): container finished" podID="dbe07020-c0c8-4d44-9bda-2683282d178d" containerID="686045b96d6e81e37109f811340a86de2e0f69c639c8309c86f050c23c3d0940" exitCode=0 Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.215261 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76c8d5b9fc-gkgbg" event={"ID":"dbe07020-c0c8-4d44-9bda-2683282d178d","Type":"ContainerDied","Data":"686045b96d6e81e37109f811340a86de2e0f69c639c8309c86f050c23c3d0940"} Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.215308 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76c8d5b9fc-gkgbg" event={"ID":"dbe07020-c0c8-4d44-9bda-2683282d178d","Type":"ContainerStarted","Data":"ed2981bbd1da832e174a527d93445a17e58b105b23b89389600e189796c08740"} Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.219479 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"b3270107-a4df-4c0f-841d-d018a9524d10\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.243072 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4","Type":"ContainerStarted","Data":"47fb53ba06afded0b1029056c6f2fa286b164e7b15a7cf592aa056b3e55459a0"} Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.252651 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-zw224" event={"ID":"7c6fa03a-4f28-4823-a618-9fe4ad3925c9","Type":"ContainerStarted","Data":"fc5934dda46def196ac4d0b4a908e9d8983ca4256b62c09dea1b070f18a57463"} Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.274451 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-798745f775-n9xtc" event={"ID":"de74d309-5e9e-4cd1-8d1a-c136ac9fa51c","Type":"ContainerStarted","Data":"8660e37195c98085384470c07716109c5286cc2e603121279dd8fadb6a194df4"} Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.276591 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-78lfm" podStartSLOduration=3.276578563 podStartE2EDuration="3.276578563s" podCreationTimestamp="2025-11-26 14:40:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:40:08.230814707 +0000 UTC m=+1475.027584911" watchObservedRunningTime="2025-11-26 14:40:08.276578563 +0000 UTC m=+1475.073348747" Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.279766 5037 generic.go:334] "Generic (PLEG): container finished" podID="0421669a-d794-43a0-81c7-8d5ee04afa07" containerID="ee401cba135ac7870f161c7239467d693fdfe277898af8ae0d6cc4c148b38e1a" exitCode=0 Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.279807 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8c7bdb785-dx2kh" event={"ID":"0421669a-d794-43a0-81c7-8d5ee04afa07","Type":"ContainerDied","Data":"ee401cba135ac7870f161c7239467d693fdfe277898af8ae0d6cc4c148b38e1a"} Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.297813 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-zw224" podStartSLOduration=3.297792712 podStartE2EDuration="3.297792712s" podCreationTimestamp="2025-11-26 14:40:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:40:08.280045649 +0000 UTC m=+1475.076815853" watchObservedRunningTime="2025-11-26 14:40:08.297792712 +0000 UTC m=+1475.094562906" Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.304091 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.422966 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.544994 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.580061 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.682629 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76c8d5b9fc-gkgbg" Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.773471 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.796298 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z4qfg\" (UniqueName: \"kubernetes.io/projected/dbe07020-c0c8-4d44-9bda-2683282d178d-kube-api-access-z4qfg\") pod \"dbe07020-c0c8-4d44-9bda-2683282d178d\" (UID: \"dbe07020-c0c8-4d44-9bda-2683282d178d\") " Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.796369 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dbe07020-c0c8-4d44-9bda-2683282d178d-dns-swift-storage-0\") pod \"dbe07020-c0c8-4d44-9bda-2683282d178d\" (UID: \"dbe07020-c0c8-4d44-9bda-2683282d178d\") " Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.796409 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dbe07020-c0c8-4d44-9bda-2683282d178d-ovsdbserver-nb\") pod \"dbe07020-c0c8-4d44-9bda-2683282d178d\" (UID: \"dbe07020-c0c8-4d44-9bda-2683282d178d\") " Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.796434 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dbe07020-c0c8-4d44-9bda-2683282d178d-config\") pod \"dbe07020-c0c8-4d44-9bda-2683282d178d\" (UID: \"dbe07020-c0c8-4d44-9bda-2683282d178d\") " Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.796458 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dbe07020-c0c8-4d44-9bda-2683282d178d-ovsdbserver-sb\") pod \"dbe07020-c0c8-4d44-9bda-2683282d178d\" (UID: \"dbe07020-c0c8-4d44-9bda-2683282d178d\") " Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.796480 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dbe07020-c0c8-4d44-9bda-2683282d178d-dns-svc\") pod \"dbe07020-c0c8-4d44-9bda-2683282d178d\" (UID: \"dbe07020-c0c8-4d44-9bda-2683282d178d\") " Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.806911 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dbe07020-c0c8-4d44-9bda-2683282d178d-kube-api-access-z4qfg" (OuterVolumeSpecName: "kube-api-access-z4qfg") pod "dbe07020-c0c8-4d44-9bda-2683282d178d" (UID: "dbe07020-c0c8-4d44-9bda-2683282d178d"). InnerVolumeSpecName "kube-api-access-z4qfg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.831102 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dbe07020-c0c8-4d44-9bda-2683282d178d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "dbe07020-c0c8-4d44-9bda-2683282d178d" (UID: "dbe07020-c0c8-4d44-9bda-2683282d178d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.857840 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dbe07020-c0c8-4d44-9bda-2683282d178d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "dbe07020-c0c8-4d44-9bda-2683282d178d" (UID: "dbe07020-c0c8-4d44-9bda-2683282d178d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.869681 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dbe07020-c0c8-4d44-9bda-2683282d178d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "dbe07020-c0c8-4d44-9bda-2683282d178d" (UID: "dbe07020-c0c8-4d44-9bda-2683282d178d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.876331 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dbe07020-c0c8-4d44-9bda-2683282d178d-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "dbe07020-c0c8-4d44-9bda-2683282d178d" (UID: "dbe07020-c0c8-4d44-9bda-2683282d178d"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.895045 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dbe07020-c0c8-4d44-9bda-2683282d178d-config" (OuterVolumeSpecName: "config") pod "dbe07020-c0c8-4d44-9bda-2683282d178d" (UID: "dbe07020-c0c8-4d44-9bda-2683282d178d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.898661 5037 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dbe07020-c0c8-4d44-9bda-2683282d178d-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.898696 5037 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dbe07020-c0c8-4d44-9bda-2683282d178d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.898709 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dbe07020-c0c8-4d44-9bda-2683282d178d-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.898721 5037 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dbe07020-c0c8-4d44-9bda-2683282d178d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.898738 5037 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dbe07020-c0c8-4d44-9bda-2683282d178d-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:08 crc kubenswrapper[5037]: I1126 14:40:08.898775 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z4qfg\" (UniqueName: \"kubernetes.io/projected/dbe07020-c0c8-4d44-9bda-2683282d178d-kube-api-access-z4qfg\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:09 crc kubenswrapper[5037]: I1126 14:40:09.001730 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8c7bdb785-dx2kh" Nov 26 14:40:09 crc kubenswrapper[5037]: I1126 14:40:09.102086 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0421669a-d794-43a0-81c7-8d5ee04afa07-dns-svc\") pod \"0421669a-d794-43a0-81c7-8d5ee04afa07\" (UID: \"0421669a-d794-43a0-81c7-8d5ee04afa07\") " Nov 26 14:40:09 crc kubenswrapper[5037]: I1126 14:40:09.102182 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bpj4n\" (UniqueName: \"kubernetes.io/projected/0421669a-d794-43a0-81c7-8d5ee04afa07-kube-api-access-bpj4n\") pod \"0421669a-d794-43a0-81c7-8d5ee04afa07\" (UID: \"0421669a-d794-43a0-81c7-8d5ee04afa07\") " Nov 26 14:40:09 crc kubenswrapper[5037]: I1126 14:40:09.102257 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0421669a-d794-43a0-81c7-8d5ee04afa07-ovsdbserver-sb\") pod \"0421669a-d794-43a0-81c7-8d5ee04afa07\" (UID: \"0421669a-d794-43a0-81c7-8d5ee04afa07\") " Nov 26 14:40:09 crc kubenswrapper[5037]: I1126 14:40:09.102367 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0421669a-d794-43a0-81c7-8d5ee04afa07-config\") pod \"0421669a-d794-43a0-81c7-8d5ee04afa07\" (UID: \"0421669a-d794-43a0-81c7-8d5ee04afa07\") " Nov 26 14:40:09 crc kubenswrapper[5037]: I1126 14:40:09.102418 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0421669a-d794-43a0-81c7-8d5ee04afa07-dns-swift-storage-0\") pod \"0421669a-d794-43a0-81c7-8d5ee04afa07\" (UID: \"0421669a-d794-43a0-81c7-8d5ee04afa07\") " Nov 26 14:40:09 crc kubenswrapper[5037]: I1126 14:40:09.102457 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0421669a-d794-43a0-81c7-8d5ee04afa07-ovsdbserver-nb\") pod \"0421669a-d794-43a0-81c7-8d5ee04afa07\" (UID: \"0421669a-d794-43a0-81c7-8d5ee04afa07\") " Nov 26 14:40:09 crc kubenswrapper[5037]: I1126 14:40:09.108110 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0421669a-d794-43a0-81c7-8d5ee04afa07-kube-api-access-bpj4n" (OuterVolumeSpecName: "kube-api-access-bpj4n") pod "0421669a-d794-43a0-81c7-8d5ee04afa07" (UID: "0421669a-d794-43a0-81c7-8d5ee04afa07"). InnerVolumeSpecName "kube-api-access-bpj4n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:40:09 crc kubenswrapper[5037]: I1126 14:40:09.133576 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0421669a-d794-43a0-81c7-8d5ee04afa07-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "0421669a-d794-43a0-81c7-8d5ee04afa07" (UID: "0421669a-d794-43a0-81c7-8d5ee04afa07"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:40:09 crc kubenswrapper[5037]: I1126 14:40:09.145932 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0421669a-d794-43a0-81c7-8d5ee04afa07-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "0421669a-d794-43a0-81c7-8d5ee04afa07" (UID: "0421669a-d794-43a0-81c7-8d5ee04afa07"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:40:09 crc kubenswrapper[5037]: I1126 14:40:09.149337 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0421669a-d794-43a0-81c7-8d5ee04afa07-config" (OuterVolumeSpecName: "config") pod "0421669a-d794-43a0-81c7-8d5ee04afa07" (UID: "0421669a-d794-43a0-81c7-8d5ee04afa07"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:40:09 crc kubenswrapper[5037]: I1126 14:40:09.150932 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0421669a-d794-43a0-81c7-8d5ee04afa07-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "0421669a-d794-43a0-81c7-8d5ee04afa07" (UID: "0421669a-d794-43a0-81c7-8d5ee04afa07"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:40:09 crc kubenswrapper[5037]: I1126 14:40:09.166460 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0421669a-d794-43a0-81c7-8d5ee04afa07-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0421669a-d794-43a0-81c7-8d5ee04afa07" (UID: "0421669a-d794-43a0-81c7-8d5ee04afa07"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:40:09 crc kubenswrapper[5037]: I1126 14:40:09.204744 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bpj4n\" (UniqueName: \"kubernetes.io/projected/0421669a-d794-43a0-81c7-8d5ee04afa07-kube-api-access-bpj4n\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:09 crc kubenswrapper[5037]: I1126 14:40:09.204779 5037 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0421669a-d794-43a0-81c7-8d5ee04afa07-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:09 crc kubenswrapper[5037]: I1126 14:40:09.204789 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0421669a-d794-43a0-81c7-8d5ee04afa07-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:09 crc kubenswrapper[5037]: I1126 14:40:09.204797 5037 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0421669a-d794-43a0-81c7-8d5ee04afa07-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:09 crc kubenswrapper[5037]: I1126 14:40:09.204805 5037 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0421669a-d794-43a0-81c7-8d5ee04afa07-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:09 crc kubenswrapper[5037]: I1126 14:40:09.204813 5037 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0421669a-d794-43a0-81c7-8d5ee04afa07-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:09 crc kubenswrapper[5037]: I1126 14:40:09.225540 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 14:40:09 crc kubenswrapper[5037]: I1126 14:40:09.327971 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"b3270107-a4df-4c0f-841d-d018a9524d10","Type":"ContainerStarted","Data":"7376077ee943de71e685f0cde5498aeed59232b67bfc120aee0ba95dfe983203"} Nov 26 14:40:09 crc kubenswrapper[5037]: I1126 14:40:09.339242 5037 generic.go:334] "Generic (PLEG): container finished" podID="de74d309-5e9e-4cd1-8d1a-c136ac9fa51c" containerID="6f9c14f89c8dfdcd45902080be403b8011e5c14a7ba8450991d55d4589fc0109" exitCode=0 Nov 26 14:40:09 crc kubenswrapper[5037]: I1126 14:40:09.339368 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-798745f775-n9xtc" event={"ID":"de74d309-5e9e-4cd1-8d1a-c136ac9fa51c","Type":"ContainerDied","Data":"6f9c14f89c8dfdcd45902080be403b8011e5c14a7ba8450991d55d4589fc0109"} Nov 26 14:40:09 crc kubenswrapper[5037]: I1126 14:40:09.347025 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8c7bdb785-dx2kh" event={"ID":"0421669a-d794-43a0-81c7-8d5ee04afa07","Type":"ContainerDied","Data":"7b95faaabf749ca6992873bdf9d2eb5bfeb3d08f68187004a2c98a2be4c700fc"} Nov 26 14:40:09 crc kubenswrapper[5037]: I1126 14:40:09.347067 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8c7bdb785-dx2kh" Nov 26 14:40:09 crc kubenswrapper[5037]: I1126 14:40:09.347074 5037 scope.go:117] "RemoveContainer" containerID="ee401cba135ac7870f161c7239467d693fdfe277898af8ae0d6cc4c148b38e1a" Nov 26 14:40:09 crc kubenswrapper[5037]: I1126 14:40:09.349609 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c8c3449b-4e23-4eab-99f2-0a4df9052607","Type":"ContainerStarted","Data":"df07d97da9462390f4ea235e2598be1b3bf36396152c42b8d8d9b3814c09e292"} Nov 26 14:40:09 crc kubenswrapper[5037]: I1126 14:40:09.362631 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76c8d5b9fc-gkgbg" Nov 26 14:40:09 crc kubenswrapper[5037]: I1126 14:40:09.367482 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76c8d5b9fc-gkgbg" event={"ID":"dbe07020-c0c8-4d44-9bda-2683282d178d","Type":"ContainerDied","Data":"ed2981bbd1da832e174a527d93445a17e58b105b23b89389600e189796c08740"} Nov 26 14:40:09 crc kubenswrapper[5037]: I1126 14:40:09.490161 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8c7bdb785-dx2kh"] Nov 26 14:40:09 crc kubenswrapper[5037]: I1126 14:40:09.512542 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8c7bdb785-dx2kh"] Nov 26 14:40:09 crc kubenswrapper[5037]: I1126 14:40:09.521544 5037 scope.go:117] "RemoveContainer" containerID="686045b96d6e81e37109f811340a86de2e0f69c639c8309c86f050c23c3d0940" Nov 26 14:40:09 crc kubenswrapper[5037]: I1126 14:40:09.532059 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-76c8d5b9fc-gkgbg"] Nov 26 14:40:09 crc kubenswrapper[5037]: I1126 14:40:09.541264 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-76c8d5b9fc-gkgbg"] Nov 26 14:40:09 crc kubenswrapper[5037]: I1126 14:40:09.931329 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0421669a-d794-43a0-81c7-8d5ee04afa07" path="/var/lib/kubelet/pods/0421669a-d794-43a0-81c7-8d5ee04afa07/volumes" Nov 26 14:40:09 crc kubenswrapper[5037]: I1126 14:40:09.932166 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dbe07020-c0c8-4d44-9bda-2683282d178d" path="/var/lib/kubelet/pods/dbe07020-c0c8-4d44-9bda-2683282d178d/volumes" Nov 26 14:40:10 crc kubenswrapper[5037]: I1126 14:40:10.388476 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c8c3449b-4e23-4eab-99f2-0a4df9052607","Type":"ContainerStarted","Data":"b73b6978197e7de434c0df717899b2ea37cd73872e1c06237b58da5ceb670968"} Nov 26 14:40:10 crc kubenswrapper[5037]: I1126 14:40:10.393869 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"b3270107-a4df-4c0f-841d-d018a9524d10","Type":"ContainerStarted","Data":"ffe296b94cfba8276646a49a18ded36be71fa073bd559a6ef9cbcb2f80fc5577"} Nov 26 14:40:10 crc kubenswrapper[5037]: I1126 14:40:10.396461 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-798745f775-n9xtc" event={"ID":"de74d309-5e9e-4cd1-8d1a-c136ac9fa51c","Type":"ContainerStarted","Data":"5507a7b9e665ee05018d8b0104c709a284e64f53e42616c429a808983a03f6e0"} Nov 26 14:40:10 crc kubenswrapper[5037]: I1126 14:40:10.397695 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-798745f775-n9xtc" Nov 26 14:40:10 crc kubenswrapper[5037]: I1126 14:40:10.416277 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-798745f775-n9xtc" podStartSLOduration=4.41626215 podStartE2EDuration="4.41626215s" podCreationTimestamp="2025-11-26 14:40:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:40:10.41382067 +0000 UTC m=+1477.210590864" watchObservedRunningTime="2025-11-26 14:40:10.41626215 +0000 UTC m=+1477.213032334" Nov 26 14:40:11 crc kubenswrapper[5037]: I1126 14:40:11.246945 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 14:40:11 crc kubenswrapper[5037]: I1126 14:40:11.247349 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 14:40:11 crc kubenswrapper[5037]: I1126 14:40:11.419369 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="c8c3449b-4e23-4eab-99f2-0a4df9052607" containerName="glance-log" containerID="cri-o://b73b6978197e7de434c0df717899b2ea37cd73872e1c06237b58da5ceb670968" gracePeriod=30 Nov 26 14:40:11 crc kubenswrapper[5037]: I1126 14:40:11.420241 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c8c3449b-4e23-4eab-99f2-0a4df9052607","Type":"ContainerStarted","Data":"467a87c298e44b15b1b37067552acf8a7601259ce4cd78345fc83c465787b7e3"} Nov 26 14:40:11 crc kubenswrapper[5037]: I1126 14:40:11.420834 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="c8c3449b-4e23-4eab-99f2-0a4df9052607" containerName="glance-httpd" containerID="cri-o://467a87c298e44b15b1b37067552acf8a7601259ce4cd78345fc83c465787b7e3" gracePeriod=30 Nov 26 14:40:11 crc kubenswrapper[5037]: I1126 14:40:11.437419 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"b3270107-a4df-4c0f-841d-d018a9524d10","Type":"ContainerStarted","Data":"b3d55c036a689e9a958deafe8ee45b7889921d64a41745461e71ace5c6d204b8"} Nov 26 14:40:11 crc kubenswrapper[5037]: I1126 14:40:11.437723 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="b3270107-a4df-4c0f-841d-d018a9524d10" containerName="glance-log" containerID="cri-o://ffe296b94cfba8276646a49a18ded36be71fa073bd559a6ef9cbcb2f80fc5577" gracePeriod=30 Nov 26 14:40:11 crc kubenswrapper[5037]: I1126 14:40:11.437787 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="b3270107-a4df-4c0f-841d-d018a9524d10" containerName="glance-httpd" containerID="cri-o://b3d55c036a689e9a958deafe8ee45b7889921d64a41745461e71ace5c6d204b8" gracePeriod=30 Nov 26 14:40:11 crc kubenswrapper[5037]: I1126 14:40:11.464145 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.464122276 podStartE2EDuration="5.464122276s" podCreationTimestamp="2025-11-26 14:40:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:40:11.446496215 +0000 UTC m=+1478.243266409" watchObservedRunningTime="2025-11-26 14:40:11.464122276 +0000 UTC m=+1478.260892480" Nov 26 14:40:11 crc kubenswrapper[5037]: I1126 14:40:11.487754 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=5.487732372 podStartE2EDuration="5.487732372s" podCreationTimestamp="2025-11-26 14:40:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:40:11.480873735 +0000 UTC m=+1478.277643909" watchObservedRunningTime="2025-11-26 14:40:11.487732372 +0000 UTC m=+1478.284502566" Nov 26 14:40:12 crc kubenswrapper[5037]: I1126 14:40:12.456696 5037 generic.go:334] "Generic (PLEG): container finished" podID="c8c3449b-4e23-4eab-99f2-0a4df9052607" containerID="467a87c298e44b15b1b37067552acf8a7601259ce4cd78345fc83c465787b7e3" exitCode=143 Nov 26 14:40:12 crc kubenswrapper[5037]: I1126 14:40:12.457010 5037 generic.go:334] "Generic (PLEG): container finished" podID="c8c3449b-4e23-4eab-99f2-0a4df9052607" containerID="b73b6978197e7de434c0df717899b2ea37cd73872e1c06237b58da5ceb670968" exitCode=143 Nov 26 14:40:12 crc kubenswrapper[5037]: I1126 14:40:12.456812 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c8c3449b-4e23-4eab-99f2-0a4df9052607","Type":"ContainerDied","Data":"467a87c298e44b15b1b37067552acf8a7601259ce4cd78345fc83c465787b7e3"} Nov 26 14:40:12 crc kubenswrapper[5037]: I1126 14:40:12.457126 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c8c3449b-4e23-4eab-99f2-0a4df9052607","Type":"ContainerDied","Data":"b73b6978197e7de434c0df717899b2ea37cd73872e1c06237b58da5ceb670968"} Nov 26 14:40:12 crc kubenswrapper[5037]: I1126 14:40:12.463756 5037 generic.go:334] "Generic (PLEG): container finished" podID="b3270107-a4df-4c0f-841d-d018a9524d10" containerID="b3d55c036a689e9a958deafe8ee45b7889921d64a41745461e71ace5c6d204b8" exitCode=143 Nov 26 14:40:12 crc kubenswrapper[5037]: I1126 14:40:12.463786 5037 generic.go:334] "Generic (PLEG): container finished" podID="b3270107-a4df-4c0f-841d-d018a9524d10" containerID="ffe296b94cfba8276646a49a18ded36be71fa073bd559a6ef9cbcb2f80fc5577" exitCode=143 Nov 26 14:40:12 crc kubenswrapper[5037]: I1126 14:40:12.464330 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"b3270107-a4df-4c0f-841d-d018a9524d10","Type":"ContainerDied","Data":"b3d55c036a689e9a958deafe8ee45b7889921d64a41745461e71ace5c6d204b8"} Nov 26 14:40:12 crc kubenswrapper[5037]: I1126 14:40:12.464359 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"b3270107-a4df-4c0f-841d-d018a9524d10","Type":"ContainerDied","Data":"ffe296b94cfba8276646a49a18ded36be71fa073bd559a6ef9cbcb2f80fc5577"} Nov 26 14:40:13 crc kubenswrapper[5037]: I1126 14:40:13.476666 5037 generic.go:334] "Generic (PLEG): container finished" podID="7c6fa03a-4f28-4823-a618-9fe4ad3925c9" containerID="fc5934dda46def196ac4d0b4a908e9d8983ca4256b62c09dea1b070f18a57463" exitCode=0 Nov 26 14:40:13 crc kubenswrapper[5037]: I1126 14:40:13.476709 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-zw224" event={"ID":"7c6fa03a-4f28-4823-a618-9fe4ad3925c9","Type":"ContainerDied","Data":"fc5934dda46def196ac4d0b4a908e9d8983ca4256b62c09dea1b070f18a57463"} Nov 26 14:40:16 crc kubenswrapper[5037]: I1126 14:40:16.964609 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-798745f775-n9xtc" Nov 26 14:40:17 crc kubenswrapper[5037]: I1126 14:40:17.063581 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86c887b9fc-fhqq9"] Nov 26 14:40:17 crc kubenswrapper[5037]: I1126 14:40:17.064741 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-86c887b9fc-fhqq9" podUID="c84209aa-144f-4082-88b0-c83eb7e57f24" containerName="dnsmasq-dns" containerID="cri-o://8889cdc2d76061e9766b819c8df825c58beba2f2de8eb27333ea5d2840bb576f" gracePeriod=10 Nov 26 14:40:18 crc kubenswrapper[5037]: I1126 14:40:18.523817 5037 generic.go:334] "Generic (PLEG): container finished" podID="c84209aa-144f-4082-88b0-c83eb7e57f24" containerID="8889cdc2d76061e9766b819c8df825c58beba2f2de8eb27333ea5d2840bb576f" exitCode=0 Nov 26 14:40:18 crc kubenswrapper[5037]: I1126 14:40:18.524509 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86c887b9fc-fhqq9" event={"ID":"c84209aa-144f-4082-88b0-c83eb7e57f24","Type":"ContainerDied","Data":"8889cdc2d76061e9766b819c8df825c58beba2f2de8eb27333ea5d2840bb576f"} Nov 26 14:40:19 crc kubenswrapper[5037]: I1126 14:40:19.543158 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-86c887b9fc-fhqq9" podUID="c84209aa-144f-4082-88b0-c83eb7e57f24" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.126:5353: connect: connection refused" Nov 26 14:40:21 crc kubenswrapper[5037]: E1126 14:40:21.797959 5037 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-central@sha256:d375d370be5ead0dac71109af644849e5795f535f9ad8eeacea261d77ae6f140" Nov 26 14:40:21 crc kubenswrapper[5037]: E1126 14:40:21.798661 5037 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-central@sha256:d375d370be5ead0dac71109af644849e5795f535f9ad8eeacea261d77ae6f140,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n67bh58bh5c5h56h54dhffh55h58dh575h56dh596h674h5d6h687h5b5h79hcch85h68fh56h5f5h555hf7h84hcbh5b5h5bfh59fh57h5cfh685h77q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5zzhd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(64dabc01-39ed-4b48-a2d4-4ca7b3070cc4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 14:40:24 crc kubenswrapper[5037]: I1126 14:40:24.542903 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-86c887b9fc-fhqq9" podUID="c84209aa-144f-4082-88b0-c83eb7e57f24" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.126:5353: connect: connection refused" Nov 26 14:40:28 crc kubenswrapper[5037]: I1126 14:40:28.989658 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.005585 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.008188 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-zw224" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.132192 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"c8c3449b-4e23-4eab-99f2-0a4df9052607\" (UID: \"c8c3449b-4e23-4eab-99f2-0a4df9052607\") " Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.132255 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c6fa03a-4f28-4823-a618-9fe4ad3925c9-combined-ca-bundle\") pod \"7c6fa03a-4f28-4823-a618-9fe4ad3925c9\" (UID: \"7c6fa03a-4f28-4823-a618-9fe4ad3925c9\") " Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.132296 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8c3449b-4e23-4eab-99f2-0a4df9052607-config-data\") pod \"c8c3449b-4e23-4eab-99f2-0a4df9052607\" (UID: \"c8c3449b-4e23-4eab-99f2-0a4df9052607\") " Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.132368 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b3270107-a4df-4c0f-841d-d018a9524d10-logs\") pod \"b3270107-a4df-4c0f-841d-d018a9524d10\" (UID: \"b3270107-a4df-4c0f-841d-d018a9524d10\") " Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.132396 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qkbmq\" (UniqueName: \"kubernetes.io/projected/b3270107-a4df-4c0f-841d-d018a9524d10-kube-api-access-qkbmq\") pod \"b3270107-a4df-4c0f-841d-d018a9524d10\" (UID: \"b3270107-a4df-4c0f-841d-d018a9524d10\") " Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.132427 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c8c3449b-4e23-4eab-99f2-0a4df9052607-httpd-run\") pod \"c8c3449b-4e23-4eab-99f2-0a4df9052607\" (UID: \"c8c3449b-4e23-4eab-99f2-0a4df9052607\") " Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.132462 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wg7nj\" (UniqueName: \"kubernetes.io/projected/c8c3449b-4e23-4eab-99f2-0a4df9052607-kube-api-access-wg7nj\") pod \"c8c3449b-4e23-4eab-99f2-0a4df9052607\" (UID: \"c8c3449b-4e23-4eab-99f2-0a4df9052607\") " Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.132545 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c8c3449b-4e23-4eab-99f2-0a4df9052607-scripts\") pod \"c8c3449b-4e23-4eab-99f2-0a4df9052607\" (UID: \"c8c3449b-4e23-4eab-99f2-0a4df9052607\") " Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.132570 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3270107-a4df-4c0f-841d-d018a9524d10-combined-ca-bundle\") pod \"b3270107-a4df-4c0f-841d-d018a9524d10\" (UID: \"b3270107-a4df-4c0f-841d-d018a9524d10\") " Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.132602 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b3270107-a4df-4c0f-841d-d018a9524d10-httpd-run\") pod \"b3270107-a4df-4c0f-841d-d018a9524d10\" (UID: \"b3270107-a4df-4c0f-841d-d018a9524d10\") " Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.132633 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3270107-a4df-4c0f-841d-d018a9524d10-config-data\") pod \"b3270107-a4df-4c0f-841d-d018a9524d10\" (UID: \"b3270107-a4df-4c0f-841d-d018a9524d10\") " Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.132670 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7c6fa03a-4f28-4823-a618-9fe4ad3925c9-credential-keys\") pod \"7c6fa03a-4f28-4823-a618-9fe4ad3925c9\" (UID: \"7c6fa03a-4f28-4823-a618-9fe4ad3925c9\") " Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.132700 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3270107-a4df-4c0f-841d-d018a9524d10-scripts\") pod \"b3270107-a4df-4c0f-841d-d018a9524d10\" (UID: \"b3270107-a4df-4c0f-841d-d018a9524d10\") " Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.132722 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c6fa03a-4f28-4823-a618-9fe4ad3925c9-config-data\") pod \"7c6fa03a-4f28-4823-a618-9fe4ad3925c9\" (UID: \"7c6fa03a-4f28-4823-a618-9fe4ad3925c9\") " Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.132745 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7c6fa03a-4f28-4823-a618-9fe4ad3925c9-fernet-keys\") pod \"7c6fa03a-4f28-4823-a618-9fe4ad3925c9\" (UID: \"7c6fa03a-4f28-4823-a618-9fe4ad3925c9\") " Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.132778 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vwlwv\" (UniqueName: \"kubernetes.io/projected/7c6fa03a-4f28-4823-a618-9fe4ad3925c9-kube-api-access-vwlwv\") pod \"7c6fa03a-4f28-4823-a618-9fe4ad3925c9\" (UID: \"7c6fa03a-4f28-4823-a618-9fe4ad3925c9\") " Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.132803 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c8c3449b-4e23-4eab-99f2-0a4df9052607-logs\") pod \"c8c3449b-4e23-4eab-99f2-0a4df9052607\" (UID: \"c8c3449b-4e23-4eab-99f2-0a4df9052607\") " Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.132842 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"b3270107-a4df-4c0f-841d-d018a9524d10\" (UID: \"b3270107-a4df-4c0f-841d-d018a9524d10\") " Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.132889 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c6fa03a-4f28-4823-a618-9fe4ad3925c9-scripts\") pod \"7c6fa03a-4f28-4823-a618-9fe4ad3925c9\" (UID: \"7c6fa03a-4f28-4823-a618-9fe4ad3925c9\") " Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.132916 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8c3449b-4e23-4eab-99f2-0a4df9052607-combined-ca-bundle\") pod \"c8c3449b-4e23-4eab-99f2-0a4df9052607\" (UID: \"c8c3449b-4e23-4eab-99f2-0a4df9052607\") " Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.133098 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c8c3449b-4e23-4eab-99f2-0a4df9052607-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "c8c3449b-4e23-4eab-99f2-0a4df9052607" (UID: "c8c3449b-4e23-4eab-99f2-0a4df9052607"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.133363 5037 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c8c3449b-4e23-4eab-99f2-0a4df9052607-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.133412 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b3270107-a4df-4c0f-841d-d018a9524d10-logs" (OuterVolumeSpecName: "logs") pod "b3270107-a4df-4c0f-841d-d018a9524d10" (UID: "b3270107-a4df-4c0f-841d-d018a9524d10"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.133439 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b3270107-a4df-4c0f-841d-d018a9524d10-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "b3270107-a4df-4c0f-841d-d018a9524d10" (UID: "b3270107-a4df-4c0f-841d-d018a9524d10"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.133727 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c8c3449b-4e23-4eab-99f2-0a4df9052607-logs" (OuterVolumeSpecName: "logs") pod "c8c3449b-4e23-4eab-99f2-0a4df9052607" (UID: "c8c3449b-4e23-4eab-99f2-0a4df9052607"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.139647 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c6fa03a-4f28-4823-a618-9fe4ad3925c9-kube-api-access-vwlwv" (OuterVolumeSpecName: "kube-api-access-vwlwv") pod "7c6fa03a-4f28-4823-a618-9fe4ad3925c9" (UID: "7c6fa03a-4f28-4823-a618-9fe4ad3925c9"). InnerVolumeSpecName "kube-api-access-vwlwv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.145079 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3270107-a4df-4c0f-841d-d018a9524d10-kube-api-access-qkbmq" (OuterVolumeSpecName: "kube-api-access-qkbmq") pod "b3270107-a4df-4c0f-841d-d018a9524d10" (UID: "b3270107-a4df-4c0f-841d-d018a9524d10"). InnerVolumeSpecName "kube-api-access-qkbmq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.145187 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c8c3449b-4e23-4eab-99f2-0a4df9052607-kube-api-access-wg7nj" (OuterVolumeSpecName: "kube-api-access-wg7nj") pod "c8c3449b-4e23-4eab-99f2-0a4df9052607" (UID: "c8c3449b-4e23-4eab-99f2-0a4df9052607"). InnerVolumeSpecName "kube-api-access-wg7nj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.149609 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8c3449b-4e23-4eab-99f2-0a4df9052607-scripts" (OuterVolumeSpecName: "scripts") pod "c8c3449b-4e23-4eab-99f2-0a4df9052607" (UID: "c8c3449b-4e23-4eab-99f2-0a4df9052607"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.149609 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "glance") pod "c8c3449b-4e23-4eab-99f2-0a4df9052607" (UID: "c8c3449b-4e23-4eab-99f2-0a4df9052607"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.149697 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3270107-a4df-4c0f-841d-d018a9524d10-scripts" (OuterVolumeSpecName: "scripts") pod "b3270107-a4df-4c0f-841d-d018a9524d10" (UID: "b3270107-a4df-4c0f-841d-d018a9524d10"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.151108 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c6fa03a-4f28-4823-a618-9fe4ad3925c9-scripts" (OuterVolumeSpecName: "scripts") pod "7c6fa03a-4f28-4823-a618-9fe4ad3925c9" (UID: "7c6fa03a-4f28-4823-a618-9fe4ad3925c9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.152777 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "glance") pod "b3270107-a4df-4c0f-841d-d018a9524d10" (UID: "b3270107-a4df-4c0f-841d-d018a9524d10"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.154570 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c6fa03a-4f28-4823-a618-9fe4ad3925c9-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "7c6fa03a-4f28-4823-a618-9fe4ad3925c9" (UID: "7c6fa03a-4f28-4823-a618-9fe4ad3925c9"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.171226 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c6fa03a-4f28-4823-a618-9fe4ad3925c9-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "7c6fa03a-4f28-4823-a618-9fe4ad3925c9" (UID: "7c6fa03a-4f28-4823-a618-9fe4ad3925c9"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.171895 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8c3449b-4e23-4eab-99f2-0a4df9052607-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c8c3449b-4e23-4eab-99f2-0a4df9052607" (UID: "c8c3449b-4e23-4eab-99f2-0a4df9052607"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.173630 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c6fa03a-4f28-4823-a618-9fe4ad3925c9-config-data" (OuterVolumeSpecName: "config-data") pod "7c6fa03a-4f28-4823-a618-9fe4ad3925c9" (UID: "7c6fa03a-4f28-4823-a618-9fe4ad3925c9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.175498 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c6fa03a-4f28-4823-a618-9fe4ad3925c9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7c6fa03a-4f28-4823-a618-9fe4ad3925c9" (UID: "7c6fa03a-4f28-4823-a618-9fe4ad3925c9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.194494 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3270107-a4df-4c0f-841d-d018a9524d10-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b3270107-a4df-4c0f-841d-d018a9524d10" (UID: "b3270107-a4df-4c0f-841d-d018a9524d10"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.205569 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8c3449b-4e23-4eab-99f2-0a4df9052607-config-data" (OuterVolumeSpecName: "config-data") pod "c8c3449b-4e23-4eab-99f2-0a4df9052607" (UID: "c8c3449b-4e23-4eab-99f2-0a4df9052607"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.213914 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3270107-a4df-4c0f-841d-d018a9524d10-config-data" (OuterVolumeSpecName: "config-data") pod "b3270107-a4df-4c0f-841d-d018a9524d10" (UID: "b3270107-a4df-4c0f-841d-d018a9524d10"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.235671 5037 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7c6fa03a-4f28-4823-a618-9fe4ad3925c9-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.235728 5037 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3270107-a4df-4c0f-841d-d018a9524d10-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.235740 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c6fa03a-4f28-4823-a618-9fe4ad3925c9-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.235750 5037 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7c6fa03a-4f28-4823-a618-9fe4ad3925c9-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.235763 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vwlwv\" (UniqueName: \"kubernetes.io/projected/7c6fa03a-4f28-4823-a618-9fe4ad3925c9-kube-api-access-vwlwv\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.235778 5037 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c8c3449b-4e23-4eab-99f2-0a4df9052607-logs\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.235825 5037 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.235837 5037 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c6fa03a-4f28-4823-a618-9fe4ad3925c9-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.235848 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8c3449b-4e23-4eab-99f2-0a4df9052607-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.235865 5037 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.235877 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c6fa03a-4f28-4823-a618-9fe4ad3925c9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.235888 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8c3449b-4e23-4eab-99f2-0a4df9052607-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.235897 5037 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b3270107-a4df-4c0f-841d-d018a9524d10-logs\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.235908 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qkbmq\" (UniqueName: \"kubernetes.io/projected/b3270107-a4df-4c0f-841d-d018a9524d10-kube-api-access-qkbmq\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.235918 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wg7nj\" (UniqueName: \"kubernetes.io/projected/c8c3449b-4e23-4eab-99f2-0a4df9052607-kube-api-access-wg7nj\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.235928 5037 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c8c3449b-4e23-4eab-99f2-0a4df9052607-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.235955 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3270107-a4df-4c0f-841d-d018a9524d10-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.235967 5037 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b3270107-a4df-4c0f-841d-d018a9524d10-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.235978 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3270107-a4df-4c0f-841d-d018a9524d10-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.256946 5037 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.259907 5037 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.339292 5037 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.339397 5037 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.624139 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-zw224" event={"ID":"7c6fa03a-4f28-4823-a618-9fe4ad3925c9","Type":"ContainerDied","Data":"d295bd5980be12914a1113b499bc78a5ddf224e91ffdae94fa34981fed438d54"} Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.624180 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d295bd5980be12914a1113b499bc78a5ddf224e91ffdae94fa34981fed438d54" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.624247 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-zw224" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.629547 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"b3270107-a4df-4c0f-841d-d018a9524d10","Type":"ContainerDied","Data":"7376077ee943de71e685f0cde5498aeed59232b67bfc120aee0ba95dfe983203"} Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.629622 5037 scope.go:117] "RemoveContainer" containerID="b3d55c036a689e9a958deafe8ee45b7889921d64a41745461e71ace5c6d204b8" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.629825 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.640346 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c8c3449b-4e23-4eab-99f2-0a4df9052607","Type":"ContainerDied","Data":"df07d97da9462390f4ea235e2598be1b3bf36396152c42b8d8d9b3814c09e292"} Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.640418 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.688914 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.714449 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.742245 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.753806 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.761201 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 14:40:29 crc kubenswrapper[5037]: E1126 14:40:29.761579 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8c3449b-4e23-4eab-99f2-0a4df9052607" containerName="glance-log" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.761598 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8c3449b-4e23-4eab-99f2-0a4df9052607" containerName="glance-log" Nov 26 14:40:29 crc kubenswrapper[5037]: E1126 14:40:29.761613 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbe07020-c0c8-4d44-9bda-2683282d178d" containerName="init" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.761619 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbe07020-c0c8-4d44-9bda-2683282d178d" containerName="init" Nov 26 14:40:29 crc kubenswrapper[5037]: E1126 14:40:29.761634 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8c3449b-4e23-4eab-99f2-0a4df9052607" containerName="glance-httpd" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.761640 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8c3449b-4e23-4eab-99f2-0a4df9052607" containerName="glance-httpd" Nov 26 14:40:29 crc kubenswrapper[5037]: E1126 14:40:29.761653 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c6fa03a-4f28-4823-a618-9fe4ad3925c9" containerName="keystone-bootstrap" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.761660 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c6fa03a-4f28-4823-a618-9fe4ad3925c9" containerName="keystone-bootstrap" Nov 26 14:40:29 crc kubenswrapper[5037]: E1126 14:40:29.761672 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3270107-a4df-4c0f-841d-d018a9524d10" containerName="glance-log" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.761677 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3270107-a4df-4c0f-841d-d018a9524d10" containerName="glance-log" Nov 26 14:40:29 crc kubenswrapper[5037]: E1126 14:40:29.761688 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3270107-a4df-4c0f-841d-d018a9524d10" containerName="glance-httpd" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.761694 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3270107-a4df-4c0f-841d-d018a9524d10" containerName="glance-httpd" Nov 26 14:40:29 crc kubenswrapper[5037]: E1126 14:40:29.761708 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0421669a-d794-43a0-81c7-8d5ee04afa07" containerName="init" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.761714 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="0421669a-d794-43a0-81c7-8d5ee04afa07" containerName="init" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.761882 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3270107-a4df-4c0f-841d-d018a9524d10" containerName="glance-httpd" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.761898 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c6fa03a-4f28-4823-a618-9fe4ad3925c9" containerName="keystone-bootstrap" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.761909 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="c8c3449b-4e23-4eab-99f2-0a4df9052607" containerName="glance-httpd" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.761920 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="dbe07020-c0c8-4d44-9bda-2683282d178d" containerName="init" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.761930 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="0421669a-d794-43a0-81c7-8d5ee04afa07" containerName="init" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.761941 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3270107-a4df-4c0f-841d-d018a9524d10" containerName="glance-log" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.761949 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="c8c3449b-4e23-4eab-99f2-0a4df9052607" containerName="glance-log" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.762894 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.765812 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-6flkn" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.765996 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.766235 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.766581 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.769126 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.770924 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.774361 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.774525 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.776406 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.800578 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.937908 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b3270107-a4df-4c0f-841d-d018a9524d10" path="/var/lib/kubelet/pods/b3270107-a4df-4c0f-841d-d018a9524d10/volumes" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.938730 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c8c3449b-4e23-4eab-99f2-0a4df9052607" path="/var/lib/kubelet/pods/c8c3449b-4e23-4eab-99f2-0a4df9052607/volumes" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.952628 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-scripts\") pod \"glance-default-external-api-0\" (UID: \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.952700 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/217b743f-dd2d-4fac-b61e-4ecd43e540d4-logs\") pod \"glance-default-internal-api-0\" (UID: \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.952804 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-config-data\") pod \"glance-default-external-api-0\" (UID: \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.952858 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-logs\") pod \"glance-default-external-api-0\" (UID: \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.952887 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.952934 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.953118 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/217b743f-dd2d-4fac-b61e-4ecd43e540d4-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.953186 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/217b743f-dd2d-4fac-b61e-4ecd43e540d4-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.953365 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.953444 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/217b743f-dd2d-4fac-b61e-4ecd43e540d4-scripts\") pod \"glance-default-internal-api-0\" (UID: \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.953501 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rgjxw\" (UniqueName: \"kubernetes.io/projected/217b743f-dd2d-4fac-b61e-4ecd43e540d4-kube-api-access-rgjxw\") pod \"glance-default-internal-api-0\" (UID: \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.953566 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.953617 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7gslp\" (UniqueName: \"kubernetes.io/projected/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-kube-api-access-7gslp\") pod \"glance-default-external-api-0\" (UID: \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.953693 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/217b743f-dd2d-4fac-b61e-4ecd43e540d4-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.953755 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:29 crc kubenswrapper[5037]: I1126 14:40:29.953822 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/217b743f-dd2d-4fac-b61e-4ecd43e540d4-config-data\") pod \"glance-default-internal-api-0\" (UID: \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.057715 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-config-data\") pod \"glance-default-external-api-0\" (UID: \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.058121 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-logs\") pod \"glance-default-external-api-0\" (UID: \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.058589 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-logs\") pod \"glance-default-external-api-0\" (UID: \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.059666 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.060577 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.060638 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.063010 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/217b743f-dd2d-4fac-b61e-4ecd43e540d4-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.063130 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/217b743f-dd2d-4fac-b61e-4ecd43e540d4-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.063176 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.063201 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/217b743f-dd2d-4fac-b61e-4ecd43e540d4-scripts\") pod \"glance-default-internal-api-0\" (UID: \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.063251 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rgjxw\" (UniqueName: \"kubernetes.io/projected/217b743f-dd2d-4fac-b61e-4ecd43e540d4-kube-api-access-rgjxw\") pod \"glance-default-internal-api-0\" (UID: \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.063343 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.063377 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7gslp\" (UniqueName: \"kubernetes.io/projected/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-kube-api-access-7gslp\") pod \"glance-default-external-api-0\" (UID: \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.063450 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/217b743f-dd2d-4fac-b61e-4ecd43e540d4-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.063499 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.063549 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/217b743f-dd2d-4fac-b61e-4ecd43e540d4-config-data\") pod \"glance-default-internal-api-0\" (UID: \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.063606 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-scripts\") pod \"glance-default-external-api-0\" (UID: \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.063656 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/217b743f-dd2d-4fac-b61e-4ecd43e540d4-logs\") pod \"glance-default-internal-api-0\" (UID: \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.065183 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/217b743f-dd2d-4fac-b61e-4ecd43e540d4-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.066165 5037 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/glance-default-internal-api-0" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.069904 5037 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/glance-default-external-api-0" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.075135 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/217b743f-dd2d-4fac-b61e-4ecd43e540d4-logs\") pod \"glance-default-internal-api-0\" (UID: \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.075678 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/217b743f-dd2d-4fac-b61e-4ecd43e540d4-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.075984 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/217b743f-dd2d-4fac-b61e-4ecd43e540d4-config-data\") pod \"glance-default-internal-api-0\" (UID: \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.080898 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-scripts\") pod \"glance-default-external-api-0\" (UID: \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.088943 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-config-data\") pod \"glance-default-external-api-0\" (UID: \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.092145 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/217b743f-dd2d-4fac-b61e-4ecd43e540d4-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.093066 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.096017 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/217b743f-dd2d-4fac-b61e-4ecd43e540d4-scripts\") pod \"glance-default-internal-api-0\" (UID: \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.105109 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.118465 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rgjxw\" (UniqueName: \"kubernetes.io/projected/217b743f-dd2d-4fac-b61e-4ecd43e540d4-kube-api-access-rgjxw\") pod \"glance-default-internal-api-0\" (UID: \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.148370 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7gslp\" (UniqueName: \"kubernetes.io/projected/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-kube-api-access-7gslp\") pod \"glance-default-external-api-0\" (UID: \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.154179 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.161391 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-zw224"] Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.166180 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\") " pod="openstack/glance-default-external-api-0" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.170121 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-zw224"] Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.231035 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-959f6"] Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.232561 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-959f6" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.236432 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.236629 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.236691 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-fpv7w" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.237051 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.237441 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.247122 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-959f6"] Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.367936 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6afbf28-95dd-4597-ac5d-f3735515e1b2-config-data\") pod \"keystone-bootstrap-959f6\" (UID: \"c6afbf28-95dd-4597-ac5d-f3735515e1b2\") " pod="openstack/keystone-bootstrap-959f6" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.367983 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6afbf28-95dd-4597-ac5d-f3735515e1b2-combined-ca-bundle\") pod \"keystone-bootstrap-959f6\" (UID: \"c6afbf28-95dd-4597-ac5d-f3735515e1b2\") " pod="openstack/keystone-bootstrap-959f6" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.368014 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c6afbf28-95dd-4597-ac5d-f3735515e1b2-fernet-keys\") pod \"keystone-bootstrap-959f6\" (UID: \"c6afbf28-95dd-4597-ac5d-f3735515e1b2\") " pod="openstack/keystone-bootstrap-959f6" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.368049 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cwtmh\" (UniqueName: \"kubernetes.io/projected/c6afbf28-95dd-4597-ac5d-f3735515e1b2-kube-api-access-cwtmh\") pod \"keystone-bootstrap-959f6\" (UID: \"c6afbf28-95dd-4597-ac5d-f3735515e1b2\") " pod="openstack/keystone-bootstrap-959f6" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.368399 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c6afbf28-95dd-4597-ac5d-f3735515e1b2-scripts\") pod \"keystone-bootstrap-959f6\" (UID: \"c6afbf28-95dd-4597-ac5d-f3735515e1b2\") " pod="openstack/keystone-bootstrap-959f6" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.368498 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c6afbf28-95dd-4597-ac5d-f3735515e1b2-credential-keys\") pod \"keystone-bootstrap-959f6\" (UID: \"c6afbf28-95dd-4597-ac5d-f3735515e1b2\") " pod="openstack/keystone-bootstrap-959f6" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.401885 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.431949 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.469803 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c6afbf28-95dd-4597-ac5d-f3735515e1b2-scripts\") pod \"keystone-bootstrap-959f6\" (UID: \"c6afbf28-95dd-4597-ac5d-f3735515e1b2\") " pod="openstack/keystone-bootstrap-959f6" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.469875 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c6afbf28-95dd-4597-ac5d-f3735515e1b2-credential-keys\") pod \"keystone-bootstrap-959f6\" (UID: \"c6afbf28-95dd-4597-ac5d-f3735515e1b2\") " pod="openstack/keystone-bootstrap-959f6" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.469952 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6afbf28-95dd-4597-ac5d-f3735515e1b2-config-data\") pod \"keystone-bootstrap-959f6\" (UID: \"c6afbf28-95dd-4597-ac5d-f3735515e1b2\") " pod="openstack/keystone-bootstrap-959f6" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.469971 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6afbf28-95dd-4597-ac5d-f3735515e1b2-combined-ca-bundle\") pod \"keystone-bootstrap-959f6\" (UID: \"c6afbf28-95dd-4597-ac5d-f3735515e1b2\") " pod="openstack/keystone-bootstrap-959f6" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.469991 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c6afbf28-95dd-4597-ac5d-f3735515e1b2-fernet-keys\") pod \"keystone-bootstrap-959f6\" (UID: \"c6afbf28-95dd-4597-ac5d-f3735515e1b2\") " pod="openstack/keystone-bootstrap-959f6" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.470008 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cwtmh\" (UniqueName: \"kubernetes.io/projected/c6afbf28-95dd-4597-ac5d-f3735515e1b2-kube-api-access-cwtmh\") pod \"keystone-bootstrap-959f6\" (UID: \"c6afbf28-95dd-4597-ac5d-f3735515e1b2\") " pod="openstack/keystone-bootstrap-959f6" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.475407 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c6afbf28-95dd-4597-ac5d-f3735515e1b2-credential-keys\") pod \"keystone-bootstrap-959f6\" (UID: \"c6afbf28-95dd-4597-ac5d-f3735515e1b2\") " pod="openstack/keystone-bootstrap-959f6" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.475901 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6afbf28-95dd-4597-ac5d-f3735515e1b2-config-data\") pod \"keystone-bootstrap-959f6\" (UID: \"c6afbf28-95dd-4597-ac5d-f3735515e1b2\") " pod="openstack/keystone-bootstrap-959f6" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.476794 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c6afbf28-95dd-4597-ac5d-f3735515e1b2-fernet-keys\") pod \"keystone-bootstrap-959f6\" (UID: \"c6afbf28-95dd-4597-ac5d-f3735515e1b2\") " pod="openstack/keystone-bootstrap-959f6" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.477279 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6afbf28-95dd-4597-ac5d-f3735515e1b2-combined-ca-bundle\") pod \"keystone-bootstrap-959f6\" (UID: \"c6afbf28-95dd-4597-ac5d-f3735515e1b2\") " pod="openstack/keystone-bootstrap-959f6" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.480795 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c6afbf28-95dd-4597-ac5d-f3735515e1b2-scripts\") pod \"keystone-bootstrap-959f6\" (UID: \"c6afbf28-95dd-4597-ac5d-f3735515e1b2\") " pod="openstack/keystone-bootstrap-959f6" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.489788 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cwtmh\" (UniqueName: \"kubernetes.io/projected/c6afbf28-95dd-4597-ac5d-f3735515e1b2-kube-api-access-cwtmh\") pod \"keystone-bootstrap-959f6\" (UID: \"c6afbf28-95dd-4597-ac5d-f3735515e1b2\") " pod="openstack/keystone-bootstrap-959f6" Nov 26 14:40:30 crc kubenswrapper[5037]: I1126 14:40:30.568881 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-959f6" Nov 26 14:40:31 crc kubenswrapper[5037]: I1126 14:40:31.927608 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7c6fa03a-4f28-4823-a618-9fe4ad3925c9" path="/var/lib/kubelet/pods/7c6fa03a-4f28-4823-a618-9fe4ad3925c9/volumes" Nov 26 14:40:32 crc kubenswrapper[5037]: I1126 14:40:32.438019 5037 scope.go:117] "RemoveContainer" containerID="ffe296b94cfba8276646a49a18ded36be71fa073bd559a6ef9cbcb2f80fc5577" Nov 26 14:40:32 crc kubenswrapper[5037]: E1126 14:40:32.447862 5037 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:37d64e0a00c54e71a4c1fcbbbf7e832f6886ffd03c9a02b6ee3ca48fabc30879" Nov 26 14:40:32 crc kubenswrapper[5037]: E1126 14:40:32.448020 5037 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:37d64e0a00c54e71a4c1fcbbbf7e832f6886ffd03c9a02b6ee3ca48fabc30879,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jt7vt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-jxxp2_openstack(50b1873a-43ee-426d-99f2-84e8267cb178): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 14:40:32 crc kubenswrapper[5037]: E1126 14:40:32.449300 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-jxxp2" podUID="50b1873a-43ee-426d-99f2-84e8267cb178" Nov 26 14:40:32 crc kubenswrapper[5037]: I1126 14:40:32.526407 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86c887b9fc-fhqq9" Nov 26 14:40:32 crc kubenswrapper[5037]: I1126 14:40:32.611033 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c84209aa-144f-4082-88b0-c83eb7e57f24-ovsdbserver-sb\") pod \"c84209aa-144f-4082-88b0-c83eb7e57f24\" (UID: \"c84209aa-144f-4082-88b0-c83eb7e57f24\") " Nov 26 14:40:32 crc kubenswrapper[5037]: I1126 14:40:32.611137 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c84209aa-144f-4082-88b0-c83eb7e57f24-config\") pod \"c84209aa-144f-4082-88b0-c83eb7e57f24\" (UID: \"c84209aa-144f-4082-88b0-c83eb7e57f24\") " Nov 26 14:40:32 crc kubenswrapper[5037]: I1126 14:40:32.611241 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c84209aa-144f-4082-88b0-c83eb7e57f24-dns-svc\") pod \"c84209aa-144f-4082-88b0-c83eb7e57f24\" (UID: \"c84209aa-144f-4082-88b0-c83eb7e57f24\") " Nov 26 14:40:32 crc kubenswrapper[5037]: I1126 14:40:32.611329 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c84209aa-144f-4082-88b0-c83eb7e57f24-dns-swift-storage-0\") pod \"c84209aa-144f-4082-88b0-c83eb7e57f24\" (UID: \"c84209aa-144f-4082-88b0-c83eb7e57f24\") " Nov 26 14:40:32 crc kubenswrapper[5037]: I1126 14:40:32.611366 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c84209aa-144f-4082-88b0-c83eb7e57f24-ovsdbserver-nb\") pod \"c84209aa-144f-4082-88b0-c83eb7e57f24\" (UID: \"c84209aa-144f-4082-88b0-c83eb7e57f24\") " Nov 26 14:40:32 crc kubenswrapper[5037]: I1126 14:40:32.611393 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9jh9z\" (UniqueName: \"kubernetes.io/projected/c84209aa-144f-4082-88b0-c83eb7e57f24-kube-api-access-9jh9z\") pod \"c84209aa-144f-4082-88b0-c83eb7e57f24\" (UID: \"c84209aa-144f-4082-88b0-c83eb7e57f24\") " Nov 26 14:40:32 crc kubenswrapper[5037]: I1126 14:40:32.616617 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c84209aa-144f-4082-88b0-c83eb7e57f24-kube-api-access-9jh9z" (OuterVolumeSpecName: "kube-api-access-9jh9z") pod "c84209aa-144f-4082-88b0-c83eb7e57f24" (UID: "c84209aa-144f-4082-88b0-c83eb7e57f24"). InnerVolumeSpecName "kube-api-access-9jh9z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:40:32 crc kubenswrapper[5037]: I1126 14:40:32.653769 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c84209aa-144f-4082-88b0-c83eb7e57f24-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c84209aa-144f-4082-88b0-c83eb7e57f24" (UID: "c84209aa-144f-4082-88b0-c83eb7e57f24"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:40:32 crc kubenswrapper[5037]: I1126 14:40:32.659024 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c84209aa-144f-4082-88b0-c83eb7e57f24-config" (OuterVolumeSpecName: "config") pod "c84209aa-144f-4082-88b0-c83eb7e57f24" (UID: "c84209aa-144f-4082-88b0-c83eb7e57f24"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:40:32 crc kubenswrapper[5037]: I1126 14:40:32.659181 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c84209aa-144f-4082-88b0-c83eb7e57f24-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c84209aa-144f-4082-88b0-c83eb7e57f24" (UID: "c84209aa-144f-4082-88b0-c83eb7e57f24"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:40:32 crc kubenswrapper[5037]: I1126 14:40:32.662696 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c84209aa-144f-4082-88b0-c83eb7e57f24-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "c84209aa-144f-4082-88b0-c83eb7e57f24" (UID: "c84209aa-144f-4082-88b0-c83eb7e57f24"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:40:32 crc kubenswrapper[5037]: I1126 14:40:32.664796 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c84209aa-144f-4082-88b0-c83eb7e57f24-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "c84209aa-144f-4082-88b0-c83eb7e57f24" (UID: "c84209aa-144f-4082-88b0-c83eb7e57f24"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:40:32 crc kubenswrapper[5037]: I1126 14:40:32.674420 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86c887b9fc-fhqq9" event={"ID":"c84209aa-144f-4082-88b0-c83eb7e57f24","Type":"ContainerDied","Data":"21c1bf35ae18cae5d10346a44424c5dcc70b25fd0973a12eb82aa42bf2862ac8"} Nov 26 14:40:32 crc kubenswrapper[5037]: I1126 14:40:32.674450 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86c887b9fc-fhqq9" Nov 26 14:40:32 crc kubenswrapper[5037]: E1126 14:40:32.678103 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:37d64e0a00c54e71a4c1fcbbbf7e832f6886ffd03c9a02b6ee3ca48fabc30879\\\"\"" pod="openstack/cinder-db-sync-jxxp2" podUID="50b1873a-43ee-426d-99f2-84e8267cb178" Nov 26 14:40:32 crc kubenswrapper[5037]: I1126 14:40:32.715140 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c84209aa-144f-4082-88b0-c83eb7e57f24-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:32 crc kubenswrapper[5037]: I1126 14:40:32.715175 5037 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c84209aa-144f-4082-88b0-c83eb7e57f24-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:32 crc kubenswrapper[5037]: I1126 14:40:32.715191 5037 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c84209aa-144f-4082-88b0-c83eb7e57f24-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:32 crc kubenswrapper[5037]: I1126 14:40:32.715202 5037 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c84209aa-144f-4082-88b0-c83eb7e57f24-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:32 crc kubenswrapper[5037]: I1126 14:40:32.715210 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9jh9z\" (UniqueName: \"kubernetes.io/projected/c84209aa-144f-4082-88b0-c83eb7e57f24-kube-api-access-9jh9z\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:32 crc kubenswrapper[5037]: I1126 14:40:32.715220 5037 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c84209aa-144f-4082-88b0-c83eb7e57f24-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:32 crc kubenswrapper[5037]: I1126 14:40:32.741201 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86c887b9fc-fhqq9"] Nov 26 14:40:32 crc kubenswrapper[5037]: I1126 14:40:32.756526 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-86c887b9fc-fhqq9"] Nov 26 14:40:32 crc kubenswrapper[5037]: I1126 14:40:32.873893 5037 scope.go:117] "RemoveContainer" containerID="467a87c298e44b15b1b37067552acf8a7601259ce4cd78345fc83c465787b7e3" Nov 26 14:40:32 crc kubenswrapper[5037]: I1126 14:40:32.895331 5037 scope.go:117] "RemoveContainer" containerID="b73b6978197e7de434c0df717899b2ea37cd73872e1c06237b58da5ceb670968" Nov 26 14:40:32 crc kubenswrapper[5037]: I1126 14:40:32.949749 5037 scope.go:117] "RemoveContainer" containerID="8889cdc2d76061e9766b819c8df825c58beba2f2de8eb27333ea5d2840bb576f" Nov 26 14:40:33 crc kubenswrapper[5037]: I1126 14:40:33.116400 5037 scope.go:117] "RemoveContainer" containerID="7694b2e339bea993dbb7ba2c5050c3c43d6c58ee1c74d9584b2a39736f8cd2a1" Nov 26 14:40:33 crc kubenswrapper[5037]: I1126 14:40:33.256240 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-959f6"] Nov 26 14:40:33 crc kubenswrapper[5037]: W1126 14:40:33.258971 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc6afbf28_95dd_4597_ac5d_f3735515e1b2.slice/crio-019c0765a3bd2054346bfebd953ae7168971516129801f50ae1d84be7e272ab2 WatchSource:0}: Error finding container 019c0765a3bd2054346bfebd953ae7168971516129801f50ae1d84be7e272ab2: Status 404 returned error can't find the container with id 019c0765a3bd2054346bfebd953ae7168971516129801f50ae1d84be7e272ab2 Nov 26 14:40:33 crc kubenswrapper[5037]: I1126 14:40:33.697686 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4","Type":"ContainerStarted","Data":"7acd7256521600da6ac1e6813aa3e9cf78cfc3cd65ec85f50d2f45137be61eac"} Nov 26 14:40:33 crc kubenswrapper[5037]: I1126 14:40:33.702644 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-pmjr8" event={"ID":"5141f29f-7b8b-493c-9d73-398f66ea4ab1","Type":"ContainerStarted","Data":"da1a1cd459be2e9979d9f0c94ca48d383fd106014f27219fe0eb565893f80f4f"} Nov 26 14:40:33 crc kubenswrapper[5037]: I1126 14:40:33.722043 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-gpxkh" event={"ID":"8e9b6916-0913-445b-8e5d-6a7f397dc9ba","Type":"ContainerStarted","Data":"5ca9c40161d4eda7b873f1e18d11190b089950565570999848569efa39f20890"} Nov 26 14:40:33 crc kubenswrapper[5037]: I1126 14:40:33.728727 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 14:40:33 crc kubenswrapper[5037]: I1126 14:40:33.740803 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-pmjr8" podStartSLOduration=3.46568428 podStartE2EDuration="28.740782516s" podCreationTimestamp="2025-11-26 14:40:05 +0000 UTC" firstStartedPulling="2025-11-26 14:40:07.552384098 +0000 UTC m=+1474.349154282" lastFinishedPulling="2025-11-26 14:40:32.827482334 +0000 UTC m=+1499.624252518" observedRunningTime="2025-11-26 14:40:33.739429643 +0000 UTC m=+1500.536199867" watchObservedRunningTime="2025-11-26 14:40:33.740782516 +0000 UTC m=+1500.537552700" Nov 26 14:40:33 crc kubenswrapper[5037]: I1126 14:40:33.746673 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-959f6" event={"ID":"c6afbf28-95dd-4597-ac5d-f3735515e1b2","Type":"ContainerStarted","Data":"cfd4a00f08204fabb6bb9f632ea7584c7a2da1a6a0424ec85b2ab6cdf18489eb"} Nov 26 14:40:33 crc kubenswrapper[5037]: I1126 14:40:33.746720 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-959f6" event={"ID":"c6afbf28-95dd-4597-ac5d-f3735515e1b2","Type":"ContainerStarted","Data":"019c0765a3bd2054346bfebd953ae7168971516129801f50ae1d84be7e272ab2"} Nov 26 14:40:33 crc kubenswrapper[5037]: I1126 14:40:33.777795 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-gpxkh" podStartSLOduration=3.813130271 podStartE2EDuration="28.777776009s" podCreationTimestamp="2025-11-26 14:40:05 +0000 UTC" firstStartedPulling="2025-11-26 14:40:07.452240184 +0000 UTC m=+1474.249010368" lastFinishedPulling="2025-11-26 14:40:32.416885932 +0000 UTC m=+1499.213656106" observedRunningTime="2025-11-26 14:40:33.766632587 +0000 UTC m=+1500.563402771" watchObservedRunningTime="2025-11-26 14:40:33.777776009 +0000 UTC m=+1500.574546193" Nov 26 14:40:33 crc kubenswrapper[5037]: I1126 14:40:33.791638 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-959f6" podStartSLOduration=3.791612867 podStartE2EDuration="3.791612867s" podCreationTimestamp="2025-11-26 14:40:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:40:33.791608657 +0000 UTC m=+1500.588378861" watchObservedRunningTime="2025-11-26 14:40:33.791612867 +0000 UTC m=+1500.588383051" Nov 26 14:40:33 crc kubenswrapper[5037]: I1126 14:40:33.940866 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c84209aa-144f-4082-88b0-c83eb7e57f24" path="/var/lib/kubelet/pods/c84209aa-144f-4082-88b0-c83eb7e57f24/volumes" Nov 26 14:40:34 crc kubenswrapper[5037]: I1126 14:40:34.543128 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-86c887b9fc-fhqq9" podUID="c84209aa-144f-4082-88b0-c83eb7e57f24" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.126:5353: i/o timeout" Nov 26 14:40:34 crc kubenswrapper[5037]: I1126 14:40:34.705403 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 14:40:34 crc kubenswrapper[5037]: W1126 14:40:34.707943 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51577a49_9ff5_419f_ade6_6b5a9abbf7c0.slice/crio-e8ade553db88109f753c04de8b571fb16f362385cbcb57c5494ff504fdf2bb86 WatchSource:0}: Error finding container e8ade553db88109f753c04de8b571fb16f362385cbcb57c5494ff504fdf2bb86: Status 404 returned error can't find the container with id e8ade553db88109f753c04de8b571fb16f362385cbcb57c5494ff504fdf2bb86 Nov 26 14:40:34 crc kubenswrapper[5037]: I1126 14:40:34.766481 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"217b743f-dd2d-4fac-b61e-4ecd43e540d4","Type":"ContainerStarted","Data":"e45352dde5f5b9a92cfe20673f8c8ed00298f5bee8f04018fad41a90935adb7e"} Nov 26 14:40:34 crc kubenswrapper[5037]: I1126 14:40:34.766523 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"217b743f-dd2d-4fac-b61e-4ecd43e540d4","Type":"ContainerStarted","Data":"3f62c38a0803b969f4015fa7b32b1291a4cd52b39b14c4d3f6c3eba3640d6b50"} Nov 26 14:40:34 crc kubenswrapper[5037]: I1126 14:40:34.768029 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"51577a49-9ff5-419f-ade6-6b5a9abbf7c0","Type":"ContainerStarted","Data":"e8ade553db88109f753c04de8b571fb16f362385cbcb57c5494ff504fdf2bb86"} Nov 26 14:40:35 crc kubenswrapper[5037]: I1126 14:40:35.791017 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"217b743f-dd2d-4fac-b61e-4ecd43e540d4","Type":"ContainerStarted","Data":"2ade519bc8f930f63a7e98b174c9ebeb57533c95227f89b8a7c1a16fc7313c88"} Nov 26 14:40:35 crc kubenswrapper[5037]: I1126 14:40:35.793411 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"51577a49-9ff5-419f-ade6-6b5a9abbf7c0","Type":"ContainerStarted","Data":"5f860a50e8a312cb271e8663accd0b603f1a9197ed83358443cce54a067f67bc"} Nov 26 14:40:35 crc kubenswrapper[5037]: I1126 14:40:35.818686 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=6.818659433 podStartE2EDuration="6.818659433s" podCreationTimestamp="2025-11-26 14:40:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:40:35.813438076 +0000 UTC m=+1502.610208260" watchObservedRunningTime="2025-11-26 14:40:35.818659433 +0000 UTC m=+1502.615429617" Nov 26 14:40:36 crc kubenswrapper[5037]: I1126 14:40:36.804801 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"51577a49-9ff5-419f-ade6-6b5a9abbf7c0","Type":"ContainerStarted","Data":"979e78ffbe0bc4990e5111e8c7519d935e4c9ab922a1af6d9c17cf6aff56f82d"} Nov 26 14:40:36 crc kubenswrapper[5037]: I1126 14:40:36.837174 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=7.837145922 podStartE2EDuration="7.837145922s" podCreationTimestamp="2025-11-26 14:40:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:40:36.829073125 +0000 UTC m=+1503.625843329" watchObservedRunningTime="2025-11-26 14:40:36.837145922 +0000 UTC m=+1503.633916106" Nov 26 14:40:37 crc kubenswrapper[5037]: I1126 14:40:37.817962 5037 generic.go:334] "Generic (PLEG): container finished" podID="8e9b6916-0913-445b-8e5d-6a7f397dc9ba" containerID="5ca9c40161d4eda7b873f1e18d11190b089950565570999848569efa39f20890" exitCode=0 Nov 26 14:40:37 crc kubenswrapper[5037]: I1126 14:40:37.818087 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-gpxkh" event={"ID":"8e9b6916-0913-445b-8e5d-6a7f397dc9ba","Type":"ContainerDied","Data":"5ca9c40161d4eda7b873f1e18d11190b089950565570999848569efa39f20890"} Nov 26 14:40:37 crc kubenswrapper[5037]: I1126 14:40:37.820853 5037 generic.go:334] "Generic (PLEG): container finished" podID="c6afbf28-95dd-4597-ac5d-f3735515e1b2" containerID="cfd4a00f08204fabb6bb9f632ea7584c7a2da1a6a0424ec85b2ab6cdf18489eb" exitCode=0 Nov 26 14:40:37 crc kubenswrapper[5037]: I1126 14:40:37.820889 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-959f6" event={"ID":"c6afbf28-95dd-4597-ac5d-f3735515e1b2","Type":"ContainerDied","Data":"cfd4a00f08204fabb6bb9f632ea7584c7a2da1a6a0424ec85b2ab6cdf18489eb"} Nov 26 14:40:38 crc kubenswrapper[5037]: I1126 14:40:38.843550 5037 generic.go:334] "Generic (PLEG): container finished" podID="5141f29f-7b8b-493c-9d73-398f66ea4ab1" containerID="da1a1cd459be2e9979d9f0c94ca48d383fd106014f27219fe0eb565893f80f4f" exitCode=0 Nov 26 14:40:38 crc kubenswrapper[5037]: I1126 14:40:38.846624 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-pmjr8" event={"ID":"5141f29f-7b8b-493c-9d73-398f66ea4ab1","Type":"ContainerDied","Data":"da1a1cd459be2e9979d9f0c94ca48d383fd106014f27219fe0eb565893f80f4f"} Nov 26 14:40:38 crc kubenswrapper[5037]: I1126 14:40:38.852284 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4","Type":"ContainerStarted","Data":"9e5f3c5c90e9812c570ac0055351a47f991610d4575a96e8c965dcfe4537a190"} Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.258191 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-959f6" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.263409 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-gpxkh" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.337658 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c6afbf28-95dd-4597-ac5d-f3735515e1b2-fernet-keys\") pod \"c6afbf28-95dd-4597-ac5d-f3735515e1b2\" (UID: \"c6afbf28-95dd-4597-ac5d-f3735515e1b2\") " Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.337697 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e9b6916-0913-445b-8e5d-6a7f397dc9ba-logs\") pod \"8e9b6916-0913-445b-8e5d-6a7f397dc9ba\" (UID: \"8e9b6916-0913-445b-8e5d-6a7f397dc9ba\") " Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.337727 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zjx8d\" (UniqueName: \"kubernetes.io/projected/8e9b6916-0913-445b-8e5d-6a7f397dc9ba-kube-api-access-zjx8d\") pod \"8e9b6916-0913-445b-8e5d-6a7f397dc9ba\" (UID: \"8e9b6916-0913-445b-8e5d-6a7f397dc9ba\") " Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.337766 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cwtmh\" (UniqueName: \"kubernetes.io/projected/c6afbf28-95dd-4597-ac5d-f3735515e1b2-kube-api-access-cwtmh\") pod \"c6afbf28-95dd-4597-ac5d-f3735515e1b2\" (UID: \"c6afbf28-95dd-4597-ac5d-f3735515e1b2\") " Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.337824 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6afbf28-95dd-4597-ac5d-f3735515e1b2-combined-ca-bundle\") pod \"c6afbf28-95dd-4597-ac5d-f3735515e1b2\" (UID: \"c6afbf28-95dd-4597-ac5d-f3735515e1b2\") " Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.337848 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e9b6916-0913-445b-8e5d-6a7f397dc9ba-scripts\") pod \"8e9b6916-0913-445b-8e5d-6a7f397dc9ba\" (UID: \"8e9b6916-0913-445b-8e5d-6a7f397dc9ba\") " Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.337865 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6afbf28-95dd-4597-ac5d-f3735515e1b2-config-data\") pod \"c6afbf28-95dd-4597-ac5d-f3735515e1b2\" (UID: \"c6afbf28-95dd-4597-ac5d-f3735515e1b2\") " Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.337908 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c6afbf28-95dd-4597-ac5d-f3735515e1b2-scripts\") pod \"c6afbf28-95dd-4597-ac5d-f3735515e1b2\" (UID: \"c6afbf28-95dd-4597-ac5d-f3735515e1b2\") " Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.337930 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e9b6916-0913-445b-8e5d-6a7f397dc9ba-combined-ca-bundle\") pod \"8e9b6916-0913-445b-8e5d-6a7f397dc9ba\" (UID: \"8e9b6916-0913-445b-8e5d-6a7f397dc9ba\") " Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.337954 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c6afbf28-95dd-4597-ac5d-f3735515e1b2-credential-keys\") pod \"c6afbf28-95dd-4597-ac5d-f3735515e1b2\" (UID: \"c6afbf28-95dd-4597-ac5d-f3735515e1b2\") " Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.337973 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e9b6916-0913-445b-8e5d-6a7f397dc9ba-config-data\") pod \"8e9b6916-0913-445b-8e5d-6a7f397dc9ba\" (UID: \"8e9b6916-0913-445b-8e5d-6a7f397dc9ba\") " Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.339112 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8e9b6916-0913-445b-8e5d-6a7f397dc9ba-logs" (OuterVolumeSpecName: "logs") pod "8e9b6916-0913-445b-8e5d-6a7f397dc9ba" (UID: "8e9b6916-0913-445b-8e5d-6a7f397dc9ba"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.343493 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e9b6916-0913-445b-8e5d-6a7f397dc9ba-kube-api-access-zjx8d" (OuterVolumeSpecName: "kube-api-access-zjx8d") pod "8e9b6916-0913-445b-8e5d-6a7f397dc9ba" (UID: "8e9b6916-0913-445b-8e5d-6a7f397dc9ba"). InnerVolumeSpecName "kube-api-access-zjx8d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.344048 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e9b6916-0913-445b-8e5d-6a7f397dc9ba-scripts" (OuterVolumeSpecName: "scripts") pod "8e9b6916-0913-445b-8e5d-6a7f397dc9ba" (UID: "8e9b6916-0913-445b-8e5d-6a7f397dc9ba"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.344773 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6afbf28-95dd-4597-ac5d-f3735515e1b2-scripts" (OuterVolumeSpecName: "scripts") pod "c6afbf28-95dd-4597-ac5d-f3735515e1b2" (UID: "c6afbf28-95dd-4597-ac5d-f3735515e1b2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.344801 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6afbf28-95dd-4597-ac5d-f3735515e1b2-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "c6afbf28-95dd-4597-ac5d-f3735515e1b2" (UID: "c6afbf28-95dd-4597-ac5d-f3735515e1b2"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.344875 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6afbf28-95dd-4597-ac5d-f3735515e1b2-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "c6afbf28-95dd-4597-ac5d-f3735515e1b2" (UID: "c6afbf28-95dd-4597-ac5d-f3735515e1b2"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.345952 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6afbf28-95dd-4597-ac5d-f3735515e1b2-kube-api-access-cwtmh" (OuterVolumeSpecName: "kube-api-access-cwtmh") pod "c6afbf28-95dd-4597-ac5d-f3735515e1b2" (UID: "c6afbf28-95dd-4597-ac5d-f3735515e1b2"). InnerVolumeSpecName "kube-api-access-cwtmh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.363116 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e9b6916-0913-445b-8e5d-6a7f397dc9ba-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8e9b6916-0913-445b-8e5d-6a7f397dc9ba" (UID: "8e9b6916-0913-445b-8e5d-6a7f397dc9ba"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.363780 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e9b6916-0913-445b-8e5d-6a7f397dc9ba-config-data" (OuterVolumeSpecName: "config-data") pod "8e9b6916-0913-445b-8e5d-6a7f397dc9ba" (UID: "8e9b6916-0913-445b-8e5d-6a7f397dc9ba"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.365132 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6afbf28-95dd-4597-ac5d-f3735515e1b2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c6afbf28-95dd-4597-ac5d-f3735515e1b2" (UID: "c6afbf28-95dd-4597-ac5d-f3735515e1b2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.373920 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6afbf28-95dd-4597-ac5d-f3735515e1b2-config-data" (OuterVolumeSpecName: "config-data") pod "c6afbf28-95dd-4597-ac5d-f3735515e1b2" (UID: "c6afbf28-95dd-4597-ac5d-f3735515e1b2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.440119 5037 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c6afbf28-95dd-4597-ac5d-f3735515e1b2-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.440171 5037 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e9b6916-0913-445b-8e5d-6a7f397dc9ba-logs\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.440191 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zjx8d\" (UniqueName: \"kubernetes.io/projected/8e9b6916-0913-445b-8e5d-6a7f397dc9ba-kube-api-access-zjx8d\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.440214 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cwtmh\" (UniqueName: \"kubernetes.io/projected/c6afbf28-95dd-4597-ac5d-f3735515e1b2-kube-api-access-cwtmh\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.440236 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6afbf28-95dd-4597-ac5d-f3735515e1b2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.440259 5037 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e9b6916-0913-445b-8e5d-6a7f397dc9ba-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.440314 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6afbf28-95dd-4597-ac5d-f3735515e1b2-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.440335 5037 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c6afbf28-95dd-4597-ac5d-f3735515e1b2-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.440357 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e9b6916-0913-445b-8e5d-6a7f397dc9ba-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.440379 5037 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c6afbf28-95dd-4597-ac5d-f3735515e1b2-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.440401 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e9b6916-0913-445b-8e5d-6a7f397dc9ba-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.867434 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-gpxkh" event={"ID":"8e9b6916-0913-445b-8e5d-6a7f397dc9ba","Type":"ContainerDied","Data":"a81060b2f94a533fd9c05f8cb34fef8288c00fe60272b528c8b99c10ad97a836"} Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.867480 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a81060b2f94a533fd9c05f8cb34fef8288c00fe60272b528c8b99c10ad97a836" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.867550 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-gpxkh" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.883193 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-959f6" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.884382 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-959f6" event={"ID":"c6afbf28-95dd-4597-ac5d-f3735515e1b2","Type":"ContainerDied","Data":"019c0765a3bd2054346bfebd953ae7168971516129801f50ae1d84be7e272ab2"} Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.884445 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="019c0765a3bd2054346bfebd953ae7168971516129801f50ae1d84be7e272ab2" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.989511 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-854dc8db7d-j5l6c"] Nov 26 14:40:39 crc kubenswrapper[5037]: E1126 14:40:39.989962 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c84209aa-144f-4082-88b0-c83eb7e57f24" containerName="dnsmasq-dns" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.989983 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="c84209aa-144f-4082-88b0-c83eb7e57f24" containerName="dnsmasq-dns" Nov 26 14:40:39 crc kubenswrapper[5037]: E1126 14:40:39.989993 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6afbf28-95dd-4597-ac5d-f3735515e1b2" containerName="keystone-bootstrap" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.990003 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6afbf28-95dd-4597-ac5d-f3735515e1b2" containerName="keystone-bootstrap" Nov 26 14:40:39 crc kubenswrapper[5037]: E1126 14:40:39.990021 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e9b6916-0913-445b-8e5d-6a7f397dc9ba" containerName="placement-db-sync" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.990029 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e9b6916-0913-445b-8e5d-6a7f397dc9ba" containerName="placement-db-sync" Nov 26 14:40:39 crc kubenswrapper[5037]: E1126 14:40:39.990050 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c84209aa-144f-4082-88b0-c83eb7e57f24" containerName="init" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.990057 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="c84209aa-144f-4082-88b0-c83eb7e57f24" containerName="init" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.990279 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e9b6916-0913-445b-8e5d-6a7f397dc9ba" containerName="placement-db-sync" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.990326 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="c84209aa-144f-4082-88b0-c83eb7e57f24" containerName="dnsmasq-dns" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.990341 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6afbf28-95dd-4597-ac5d-f3735515e1b2" containerName="keystone-bootstrap" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.991400 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-854dc8db7d-j5l6c" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.999187 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.999345 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.999392 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-pq7hs" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.999417 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 26 14:40:39 crc kubenswrapper[5037]: I1126 14:40:39.999537 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.015320 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-854dc8db7d-j5l6c"] Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.055169 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kggbc\" (UniqueName: \"kubernetes.io/projected/c2d75a18-6446-4558-af57-c6e0c957fc3b-kube-api-access-kggbc\") pod \"placement-854dc8db7d-j5l6c\" (UID: \"c2d75a18-6446-4558-af57-c6e0c957fc3b\") " pod="openstack/placement-854dc8db7d-j5l6c" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.055225 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2d75a18-6446-4558-af57-c6e0c957fc3b-scripts\") pod \"placement-854dc8db7d-j5l6c\" (UID: \"c2d75a18-6446-4558-af57-c6e0c957fc3b\") " pod="openstack/placement-854dc8db7d-j5l6c" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.055279 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c2d75a18-6446-4558-af57-c6e0c957fc3b-internal-tls-certs\") pod \"placement-854dc8db7d-j5l6c\" (UID: \"c2d75a18-6446-4558-af57-c6e0c957fc3b\") " pod="openstack/placement-854dc8db7d-j5l6c" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.055364 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2d75a18-6446-4558-af57-c6e0c957fc3b-combined-ca-bundle\") pod \"placement-854dc8db7d-j5l6c\" (UID: \"c2d75a18-6446-4558-af57-c6e0c957fc3b\") " pod="openstack/placement-854dc8db7d-j5l6c" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.055428 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c2d75a18-6446-4558-af57-c6e0c957fc3b-public-tls-certs\") pod \"placement-854dc8db7d-j5l6c\" (UID: \"c2d75a18-6446-4558-af57-c6e0c957fc3b\") " pod="openstack/placement-854dc8db7d-j5l6c" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.055464 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2d75a18-6446-4558-af57-c6e0c957fc3b-config-data\") pod \"placement-854dc8db7d-j5l6c\" (UID: \"c2d75a18-6446-4558-af57-c6e0c957fc3b\") " pod="openstack/placement-854dc8db7d-j5l6c" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.055483 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2d75a18-6446-4558-af57-c6e0c957fc3b-logs\") pod \"placement-854dc8db7d-j5l6c\" (UID: \"c2d75a18-6446-4558-af57-c6e0c957fc3b\") " pod="openstack/placement-854dc8db7d-j5l6c" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.067559 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-fb548d49-hf8zh"] Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.068993 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-fb548d49-hf8zh" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.074022 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.074269 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.074464 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.074634 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.075494 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-fpv7w" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.078010 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.093653 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-fb548d49-hf8zh"] Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.160574 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kggbc\" (UniqueName: \"kubernetes.io/projected/c2d75a18-6446-4558-af57-c6e0c957fc3b-kube-api-access-kggbc\") pod \"placement-854dc8db7d-j5l6c\" (UID: \"c2d75a18-6446-4558-af57-c6e0c957fc3b\") " pod="openstack/placement-854dc8db7d-j5l6c" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.160618 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2d75a18-6446-4558-af57-c6e0c957fc3b-scripts\") pod \"placement-854dc8db7d-j5l6c\" (UID: \"c2d75a18-6446-4558-af57-c6e0c957fc3b\") " pod="openstack/placement-854dc8db7d-j5l6c" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.160660 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-config-data\") pod \"keystone-fb548d49-hf8zh\" (UID: \"fe13f626-50c7-4ec3-b967-20f038731571\") " pod="openstack/keystone-fb548d49-hf8zh" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.160686 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-internal-tls-certs\") pod \"keystone-fb548d49-hf8zh\" (UID: \"fe13f626-50c7-4ec3-b967-20f038731571\") " pod="openstack/keystone-fb548d49-hf8zh" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.160730 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c2d75a18-6446-4558-af57-c6e0c957fc3b-internal-tls-certs\") pod \"placement-854dc8db7d-j5l6c\" (UID: \"c2d75a18-6446-4558-af57-c6e0c957fc3b\") " pod="openstack/placement-854dc8db7d-j5l6c" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.160753 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-credential-keys\") pod \"keystone-fb548d49-hf8zh\" (UID: \"fe13f626-50c7-4ec3-b967-20f038731571\") " pod="openstack/keystone-fb548d49-hf8zh" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.160795 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2d75a18-6446-4558-af57-c6e0c957fc3b-combined-ca-bundle\") pod \"placement-854dc8db7d-j5l6c\" (UID: \"c2d75a18-6446-4558-af57-c6e0c957fc3b\") " pod="openstack/placement-854dc8db7d-j5l6c" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.160829 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbtq5\" (UniqueName: \"kubernetes.io/projected/fe13f626-50c7-4ec3-b967-20f038731571-kube-api-access-bbtq5\") pod \"keystone-fb548d49-hf8zh\" (UID: \"fe13f626-50c7-4ec3-b967-20f038731571\") " pod="openstack/keystone-fb548d49-hf8zh" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.160855 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-scripts\") pod \"keystone-fb548d49-hf8zh\" (UID: \"fe13f626-50c7-4ec3-b967-20f038731571\") " pod="openstack/keystone-fb548d49-hf8zh" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.160883 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c2d75a18-6446-4558-af57-c6e0c957fc3b-public-tls-certs\") pod \"placement-854dc8db7d-j5l6c\" (UID: \"c2d75a18-6446-4558-af57-c6e0c957fc3b\") " pod="openstack/placement-854dc8db7d-j5l6c" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.160913 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2d75a18-6446-4558-af57-c6e0c957fc3b-config-data\") pod \"placement-854dc8db7d-j5l6c\" (UID: \"c2d75a18-6446-4558-af57-c6e0c957fc3b\") " pod="openstack/placement-854dc8db7d-j5l6c" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.160934 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2d75a18-6446-4558-af57-c6e0c957fc3b-logs\") pod \"placement-854dc8db7d-j5l6c\" (UID: \"c2d75a18-6446-4558-af57-c6e0c957fc3b\") " pod="openstack/placement-854dc8db7d-j5l6c" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.160961 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-public-tls-certs\") pod \"keystone-fb548d49-hf8zh\" (UID: \"fe13f626-50c7-4ec3-b967-20f038731571\") " pod="openstack/keystone-fb548d49-hf8zh" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.161015 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-combined-ca-bundle\") pod \"keystone-fb548d49-hf8zh\" (UID: \"fe13f626-50c7-4ec3-b967-20f038731571\") " pod="openstack/keystone-fb548d49-hf8zh" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.161060 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-fernet-keys\") pod \"keystone-fb548d49-hf8zh\" (UID: \"fe13f626-50c7-4ec3-b967-20f038731571\") " pod="openstack/keystone-fb548d49-hf8zh" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.161608 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2d75a18-6446-4558-af57-c6e0c957fc3b-logs\") pod \"placement-854dc8db7d-j5l6c\" (UID: \"c2d75a18-6446-4558-af57-c6e0c957fc3b\") " pod="openstack/placement-854dc8db7d-j5l6c" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.165056 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c2d75a18-6446-4558-af57-c6e0c957fc3b-internal-tls-certs\") pod \"placement-854dc8db7d-j5l6c\" (UID: \"c2d75a18-6446-4558-af57-c6e0c957fc3b\") " pod="openstack/placement-854dc8db7d-j5l6c" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.165096 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2d75a18-6446-4558-af57-c6e0c957fc3b-combined-ca-bundle\") pod \"placement-854dc8db7d-j5l6c\" (UID: \"c2d75a18-6446-4558-af57-c6e0c957fc3b\") " pod="openstack/placement-854dc8db7d-j5l6c" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.166046 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2d75a18-6446-4558-af57-c6e0c957fc3b-scripts\") pod \"placement-854dc8db7d-j5l6c\" (UID: \"c2d75a18-6446-4558-af57-c6e0c957fc3b\") " pod="openstack/placement-854dc8db7d-j5l6c" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.166215 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2d75a18-6446-4558-af57-c6e0c957fc3b-config-data\") pod \"placement-854dc8db7d-j5l6c\" (UID: \"c2d75a18-6446-4558-af57-c6e0c957fc3b\") " pod="openstack/placement-854dc8db7d-j5l6c" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.167114 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c2d75a18-6446-4558-af57-c6e0c957fc3b-public-tls-certs\") pod \"placement-854dc8db7d-j5l6c\" (UID: \"c2d75a18-6446-4558-af57-c6e0c957fc3b\") " pod="openstack/placement-854dc8db7d-j5l6c" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.180441 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kggbc\" (UniqueName: \"kubernetes.io/projected/c2d75a18-6446-4558-af57-c6e0c957fc3b-kube-api-access-kggbc\") pod \"placement-854dc8db7d-j5l6c\" (UID: \"c2d75a18-6446-4558-af57-c6e0c957fc3b\") " pod="openstack/placement-854dc8db7d-j5l6c" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.262872 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-public-tls-certs\") pod \"keystone-fb548d49-hf8zh\" (UID: \"fe13f626-50c7-4ec3-b967-20f038731571\") " pod="openstack/keystone-fb548d49-hf8zh" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.263594 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-combined-ca-bundle\") pod \"keystone-fb548d49-hf8zh\" (UID: \"fe13f626-50c7-4ec3-b967-20f038731571\") " pod="openstack/keystone-fb548d49-hf8zh" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.263639 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-fernet-keys\") pod \"keystone-fb548d49-hf8zh\" (UID: \"fe13f626-50c7-4ec3-b967-20f038731571\") " pod="openstack/keystone-fb548d49-hf8zh" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.263687 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-config-data\") pod \"keystone-fb548d49-hf8zh\" (UID: \"fe13f626-50c7-4ec3-b967-20f038731571\") " pod="openstack/keystone-fb548d49-hf8zh" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.263703 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-internal-tls-certs\") pod \"keystone-fb548d49-hf8zh\" (UID: \"fe13f626-50c7-4ec3-b967-20f038731571\") " pod="openstack/keystone-fb548d49-hf8zh" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.263731 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-credential-keys\") pod \"keystone-fb548d49-hf8zh\" (UID: \"fe13f626-50c7-4ec3-b967-20f038731571\") " pod="openstack/keystone-fb548d49-hf8zh" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.263812 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbtq5\" (UniqueName: \"kubernetes.io/projected/fe13f626-50c7-4ec3-b967-20f038731571-kube-api-access-bbtq5\") pod \"keystone-fb548d49-hf8zh\" (UID: \"fe13f626-50c7-4ec3-b967-20f038731571\") " pod="openstack/keystone-fb548d49-hf8zh" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.263831 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-scripts\") pod \"keystone-fb548d49-hf8zh\" (UID: \"fe13f626-50c7-4ec3-b967-20f038731571\") " pod="openstack/keystone-fb548d49-hf8zh" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.267708 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-combined-ca-bundle\") pod \"keystone-fb548d49-hf8zh\" (UID: \"fe13f626-50c7-4ec3-b967-20f038731571\") " pod="openstack/keystone-fb548d49-hf8zh" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.268193 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-scripts\") pod \"keystone-fb548d49-hf8zh\" (UID: \"fe13f626-50c7-4ec3-b967-20f038731571\") " pod="openstack/keystone-fb548d49-hf8zh" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.268323 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-config-data\") pod \"keystone-fb548d49-hf8zh\" (UID: \"fe13f626-50c7-4ec3-b967-20f038731571\") " pod="openstack/keystone-fb548d49-hf8zh" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.269210 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-credential-keys\") pod \"keystone-fb548d49-hf8zh\" (UID: \"fe13f626-50c7-4ec3-b967-20f038731571\") " pod="openstack/keystone-fb548d49-hf8zh" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.269468 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-internal-tls-certs\") pod \"keystone-fb548d49-hf8zh\" (UID: \"fe13f626-50c7-4ec3-b967-20f038731571\") " pod="openstack/keystone-fb548d49-hf8zh" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.271148 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-public-tls-certs\") pod \"keystone-fb548d49-hf8zh\" (UID: \"fe13f626-50c7-4ec3-b967-20f038731571\") " pod="openstack/keystone-fb548d49-hf8zh" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.283587 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbtq5\" (UniqueName: \"kubernetes.io/projected/fe13f626-50c7-4ec3-b967-20f038731571-kube-api-access-bbtq5\") pod \"keystone-fb548d49-hf8zh\" (UID: \"fe13f626-50c7-4ec3-b967-20f038731571\") " pod="openstack/keystone-fb548d49-hf8zh" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.291043 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-fernet-keys\") pod \"keystone-fb548d49-hf8zh\" (UID: \"fe13f626-50c7-4ec3-b967-20f038731571\") " pod="openstack/keystone-fb548d49-hf8zh" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.318490 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-854dc8db7d-j5l6c" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.320979 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-pmjr8" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.402789 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.402995 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.416031 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-fb548d49-hf8zh" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.435423 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.435809 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.442415 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.468410 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/5141f29f-7b8b-493c-9d73-398f66ea4ab1-db-sync-config-data\") pod \"5141f29f-7b8b-493c-9d73-398f66ea4ab1\" (UID: \"5141f29f-7b8b-493c-9d73-398f66ea4ab1\") " Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.468603 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9mlwm\" (UniqueName: \"kubernetes.io/projected/5141f29f-7b8b-493c-9d73-398f66ea4ab1-kube-api-access-9mlwm\") pod \"5141f29f-7b8b-493c-9d73-398f66ea4ab1\" (UID: \"5141f29f-7b8b-493c-9d73-398f66ea4ab1\") " Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.468778 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5141f29f-7b8b-493c-9d73-398f66ea4ab1-combined-ca-bundle\") pod \"5141f29f-7b8b-493c-9d73-398f66ea4ab1\" (UID: \"5141f29f-7b8b-493c-9d73-398f66ea4ab1\") " Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.477536 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.478891 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5141f29f-7b8b-493c-9d73-398f66ea4ab1-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "5141f29f-7b8b-493c-9d73-398f66ea4ab1" (UID: "5141f29f-7b8b-493c-9d73-398f66ea4ab1"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.485653 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5141f29f-7b8b-493c-9d73-398f66ea4ab1-kube-api-access-9mlwm" (OuterVolumeSpecName: "kube-api-access-9mlwm") pod "5141f29f-7b8b-493c-9d73-398f66ea4ab1" (UID: "5141f29f-7b8b-493c-9d73-398f66ea4ab1"). InnerVolumeSpecName "kube-api-access-9mlwm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.526774 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5141f29f-7b8b-493c-9d73-398f66ea4ab1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5141f29f-7b8b-493c-9d73-398f66ea4ab1" (UID: "5141f29f-7b8b-493c-9d73-398f66ea4ab1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.536075 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.546193 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.572719 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9mlwm\" (UniqueName: \"kubernetes.io/projected/5141f29f-7b8b-493c-9d73-398f66ea4ab1-kube-api-access-9mlwm\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.572989 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5141f29f-7b8b-493c-9d73-398f66ea4ab1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.573158 5037 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/5141f29f-7b8b-493c-9d73-398f66ea4ab1-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.815423 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-854dc8db7d-j5l6c"] Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.901570 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-pmjr8" event={"ID":"5141f29f-7b8b-493c-9d73-398f66ea4ab1","Type":"ContainerDied","Data":"fec0b63a07134a23977bcb47b2ca874361008c6c753a0938700b4f7b6cb98776"} Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.902077 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fec0b63a07134a23977bcb47b2ca874361008c6c753a0938700b4f7b6cb98776" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.902162 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-pmjr8" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.919210 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-854dc8db7d-j5l6c" event={"ID":"c2d75a18-6446-4558-af57-c6e0c957fc3b","Type":"ContainerStarted","Data":"23c68f217ca0c448f87dae9e2be4b25a2fb59c81999e180f1fb5799aa26a1180"} Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.919250 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.919457 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.919510 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.919684 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 26 14:40:40 crc kubenswrapper[5037]: I1126 14:40:40.944782 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-fb548d49-hf8zh"] Nov 26 14:40:40 crc kubenswrapper[5037]: W1126 14:40:40.956302 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfe13f626_50c7_4ec3_b967_20f038731571.slice/crio-b56f68978e382944afd896d3bf1e868b543a2597c945b02949a81cc88ef83c5a WatchSource:0}: Error finding container b56f68978e382944afd896d3bf1e868b543a2597c945b02949a81cc88ef83c5a: Status 404 returned error can't find the container with id b56f68978e382944afd896d3bf1e868b543a2597c945b02949a81cc88ef83c5a Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.183350 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-7c767587b5-nzlv9"] Nov 26 14:40:41 crc kubenswrapper[5037]: E1126 14:40:41.183879 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5141f29f-7b8b-493c-9d73-398f66ea4ab1" containerName="barbican-db-sync" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.183905 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="5141f29f-7b8b-493c-9d73-398f66ea4ab1" containerName="barbican-db-sync" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.184132 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="5141f29f-7b8b-493c-9d73-398f66ea4ab1" containerName="barbican-db-sync" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.185465 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7c767587b5-nzlv9" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.193718 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.193919 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.194507 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-9lcbd" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.219260 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-7c767587b5-nzlv9"] Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.246837 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-7ddc4956b6-dfqsv"] Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.248887 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-7ddc4956b6-dfqsv" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.250087 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.250127 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.250166 5037 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.253227 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.254457 5037 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5e69d7717514aa68d798cc4f8eee9b2d5d3e9666ca3b110c2cb4c6b90f9e1181"} pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.254546 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" containerID="cri-o://5e69d7717514aa68d798cc4f8eee9b2d5d3e9666ca3b110c2cb4c6b90f9e1181" gracePeriod=600 Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.291195 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19ae84d4-26f8-4e11-bd01-da880def5547-config-data\") pod \"barbican-keystone-listener-7ddc4956b6-dfqsv\" (UID: \"19ae84d4-26f8-4e11-bd01-da880def5547\") " pod="openstack/barbican-keystone-listener-7ddc4956b6-dfqsv" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.291515 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/19ae84d4-26f8-4e11-bd01-da880def5547-logs\") pod \"barbican-keystone-listener-7ddc4956b6-dfqsv\" (UID: \"19ae84d4-26f8-4e11-bd01-da880def5547\") " pod="openstack/barbican-keystone-listener-7ddc4956b6-dfqsv" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.291536 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zxs72\" (UniqueName: \"kubernetes.io/projected/6d49cc40-ce20-415f-a979-398430c2bd81-kube-api-access-zxs72\") pod \"barbican-worker-7c767587b5-nzlv9\" (UID: \"6d49cc40-ce20-415f-a979-398430c2bd81\") " pod="openstack/barbican-worker-7c767587b5-nzlv9" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.291650 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19ae84d4-26f8-4e11-bd01-da880def5547-combined-ca-bundle\") pod \"barbican-keystone-listener-7ddc4956b6-dfqsv\" (UID: \"19ae84d4-26f8-4e11-bd01-da880def5547\") " pod="openstack/barbican-keystone-listener-7ddc4956b6-dfqsv" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.291845 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kb9w4\" (UniqueName: \"kubernetes.io/projected/19ae84d4-26f8-4e11-bd01-da880def5547-kube-api-access-kb9w4\") pod \"barbican-keystone-listener-7ddc4956b6-dfqsv\" (UID: \"19ae84d4-26f8-4e11-bd01-da880def5547\") " pod="openstack/barbican-keystone-listener-7ddc4956b6-dfqsv" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.291926 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d49cc40-ce20-415f-a979-398430c2bd81-combined-ca-bundle\") pod \"barbican-worker-7c767587b5-nzlv9\" (UID: \"6d49cc40-ce20-415f-a979-398430c2bd81\") " pod="openstack/barbican-worker-7c767587b5-nzlv9" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.291952 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6d49cc40-ce20-415f-a979-398430c2bd81-logs\") pod \"barbican-worker-7c767587b5-nzlv9\" (UID: \"6d49cc40-ce20-415f-a979-398430c2bd81\") " pod="openstack/barbican-worker-7c767587b5-nzlv9" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.292018 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6d49cc40-ce20-415f-a979-398430c2bd81-config-data-custom\") pod \"barbican-worker-7c767587b5-nzlv9\" (UID: \"6d49cc40-ce20-415f-a979-398430c2bd81\") " pod="openstack/barbican-worker-7c767587b5-nzlv9" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.292108 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d49cc40-ce20-415f-a979-398430c2bd81-config-data\") pod \"barbican-worker-7c767587b5-nzlv9\" (UID: \"6d49cc40-ce20-415f-a979-398430c2bd81\") " pod="openstack/barbican-worker-7c767587b5-nzlv9" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.292175 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/19ae84d4-26f8-4e11-bd01-da880def5547-config-data-custom\") pod \"barbican-keystone-listener-7ddc4956b6-dfqsv\" (UID: \"19ae84d4-26f8-4e11-bd01-da880def5547\") " pod="openstack/barbican-keystone-listener-7ddc4956b6-dfqsv" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.322330 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-7ddc4956b6-dfqsv"] Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.379960 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8b9b87645-k5scf"] Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.381453 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b9b87645-k5scf" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.393705 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19ae84d4-26f8-4e11-bd01-da880def5547-config-data\") pod \"barbican-keystone-listener-7ddc4956b6-dfqsv\" (UID: \"19ae84d4-26f8-4e11-bd01-da880def5547\") " pod="openstack/barbican-keystone-listener-7ddc4956b6-dfqsv" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.393780 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zxs72\" (UniqueName: \"kubernetes.io/projected/6d49cc40-ce20-415f-a979-398430c2bd81-kube-api-access-zxs72\") pod \"barbican-worker-7c767587b5-nzlv9\" (UID: \"6d49cc40-ce20-415f-a979-398430c2bd81\") " pod="openstack/barbican-worker-7c767587b5-nzlv9" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.393801 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/19ae84d4-26f8-4e11-bd01-da880def5547-logs\") pod \"barbican-keystone-listener-7ddc4956b6-dfqsv\" (UID: \"19ae84d4-26f8-4e11-bd01-da880def5547\") " pod="openstack/barbican-keystone-listener-7ddc4956b6-dfqsv" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.393836 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19ae84d4-26f8-4e11-bd01-da880def5547-combined-ca-bundle\") pod \"barbican-keystone-listener-7ddc4956b6-dfqsv\" (UID: \"19ae84d4-26f8-4e11-bd01-da880def5547\") " pod="openstack/barbican-keystone-listener-7ddc4956b6-dfqsv" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.393870 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kb9w4\" (UniqueName: \"kubernetes.io/projected/19ae84d4-26f8-4e11-bd01-da880def5547-kube-api-access-kb9w4\") pod \"barbican-keystone-listener-7ddc4956b6-dfqsv\" (UID: \"19ae84d4-26f8-4e11-bd01-da880def5547\") " pod="openstack/barbican-keystone-listener-7ddc4956b6-dfqsv" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.393896 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d49cc40-ce20-415f-a979-398430c2bd81-combined-ca-bundle\") pod \"barbican-worker-7c767587b5-nzlv9\" (UID: \"6d49cc40-ce20-415f-a979-398430c2bd81\") " pod="openstack/barbican-worker-7c767587b5-nzlv9" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.393912 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6d49cc40-ce20-415f-a979-398430c2bd81-logs\") pod \"barbican-worker-7c767587b5-nzlv9\" (UID: \"6d49cc40-ce20-415f-a979-398430c2bd81\") " pod="openstack/barbican-worker-7c767587b5-nzlv9" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.393931 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6d49cc40-ce20-415f-a979-398430c2bd81-config-data-custom\") pod \"barbican-worker-7c767587b5-nzlv9\" (UID: \"6d49cc40-ce20-415f-a979-398430c2bd81\") " pod="openstack/barbican-worker-7c767587b5-nzlv9" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.393962 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d49cc40-ce20-415f-a979-398430c2bd81-config-data\") pod \"barbican-worker-7c767587b5-nzlv9\" (UID: \"6d49cc40-ce20-415f-a979-398430c2bd81\") " pod="openstack/barbican-worker-7c767587b5-nzlv9" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.393979 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/19ae84d4-26f8-4e11-bd01-da880def5547-config-data-custom\") pod \"barbican-keystone-listener-7ddc4956b6-dfqsv\" (UID: \"19ae84d4-26f8-4e11-bd01-da880def5547\") " pod="openstack/barbican-keystone-listener-7ddc4956b6-dfqsv" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.395715 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6d49cc40-ce20-415f-a979-398430c2bd81-logs\") pod \"barbican-worker-7c767587b5-nzlv9\" (UID: \"6d49cc40-ce20-415f-a979-398430c2bd81\") " pod="openstack/barbican-worker-7c767587b5-nzlv9" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.399534 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/19ae84d4-26f8-4e11-bd01-da880def5547-logs\") pod \"barbican-keystone-listener-7ddc4956b6-dfqsv\" (UID: \"19ae84d4-26f8-4e11-bd01-da880def5547\") " pod="openstack/barbican-keystone-listener-7ddc4956b6-dfqsv" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.408197 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d49cc40-ce20-415f-a979-398430c2bd81-combined-ca-bundle\") pod \"barbican-worker-7c767587b5-nzlv9\" (UID: \"6d49cc40-ce20-415f-a979-398430c2bd81\") " pod="openstack/barbican-worker-7c767587b5-nzlv9" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.408240 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/19ae84d4-26f8-4e11-bd01-da880def5547-config-data-custom\") pod \"barbican-keystone-listener-7ddc4956b6-dfqsv\" (UID: \"19ae84d4-26f8-4e11-bd01-da880def5547\") " pod="openstack/barbican-keystone-listener-7ddc4956b6-dfqsv" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.408273 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8b9b87645-k5scf"] Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.419659 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19ae84d4-26f8-4e11-bd01-da880def5547-config-data\") pod \"barbican-keystone-listener-7ddc4956b6-dfqsv\" (UID: \"19ae84d4-26f8-4e11-bd01-da880def5547\") " pod="openstack/barbican-keystone-listener-7ddc4956b6-dfqsv" Nov 26 14:40:41 crc kubenswrapper[5037]: E1126 14:40:41.423370 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.425938 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kb9w4\" (UniqueName: \"kubernetes.io/projected/19ae84d4-26f8-4e11-bd01-da880def5547-kube-api-access-kb9w4\") pod \"barbican-keystone-listener-7ddc4956b6-dfqsv\" (UID: \"19ae84d4-26f8-4e11-bd01-da880def5547\") " pod="openstack/barbican-keystone-listener-7ddc4956b6-dfqsv" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.426083 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19ae84d4-26f8-4e11-bd01-da880def5547-combined-ca-bundle\") pod \"barbican-keystone-listener-7ddc4956b6-dfqsv\" (UID: \"19ae84d4-26f8-4e11-bd01-da880def5547\") " pod="openstack/barbican-keystone-listener-7ddc4956b6-dfqsv" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.427264 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d49cc40-ce20-415f-a979-398430c2bd81-config-data\") pod \"barbican-worker-7c767587b5-nzlv9\" (UID: \"6d49cc40-ce20-415f-a979-398430c2bd81\") " pod="openstack/barbican-worker-7c767587b5-nzlv9" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.436359 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6d49cc40-ce20-415f-a979-398430c2bd81-config-data-custom\") pod \"barbican-worker-7c767587b5-nzlv9\" (UID: \"6d49cc40-ce20-415f-a979-398430c2bd81\") " pod="openstack/barbican-worker-7c767587b5-nzlv9" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.458912 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zxs72\" (UniqueName: \"kubernetes.io/projected/6d49cc40-ce20-415f-a979-398430c2bd81-kube-api-access-zxs72\") pod \"barbican-worker-7c767587b5-nzlv9\" (UID: \"6d49cc40-ce20-415f-a979-398430c2bd81\") " pod="openstack/barbican-worker-7c767587b5-nzlv9" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.483203 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-84d74cb456-scxkz"] Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.486966 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-84d74cb456-scxkz" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.489523 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.496621 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-84d74cb456-scxkz"] Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.522040 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7c767587b5-nzlv9" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.522072 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e5272984-1396-4916-b718-6ea94513f800-config\") pod \"dnsmasq-dns-8b9b87645-k5scf\" (UID: \"e5272984-1396-4916-b718-6ea94513f800\") " pod="openstack/dnsmasq-dns-8b9b87645-k5scf" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.522123 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e5272984-1396-4916-b718-6ea94513f800-dns-svc\") pod \"dnsmasq-dns-8b9b87645-k5scf\" (UID: \"e5272984-1396-4916-b718-6ea94513f800\") " pod="openstack/dnsmasq-dns-8b9b87645-k5scf" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.522150 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-25vsn\" (UniqueName: \"kubernetes.io/projected/e5272984-1396-4916-b718-6ea94513f800-kube-api-access-25vsn\") pod \"dnsmasq-dns-8b9b87645-k5scf\" (UID: \"e5272984-1396-4916-b718-6ea94513f800\") " pod="openstack/dnsmasq-dns-8b9b87645-k5scf" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.522193 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e5272984-1396-4916-b718-6ea94513f800-ovsdbserver-nb\") pod \"dnsmasq-dns-8b9b87645-k5scf\" (UID: \"e5272984-1396-4916-b718-6ea94513f800\") " pod="openstack/dnsmasq-dns-8b9b87645-k5scf" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.522271 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e5272984-1396-4916-b718-6ea94513f800-dns-swift-storage-0\") pod \"dnsmasq-dns-8b9b87645-k5scf\" (UID: \"e5272984-1396-4916-b718-6ea94513f800\") " pod="openstack/dnsmasq-dns-8b9b87645-k5scf" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.522341 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e5272984-1396-4916-b718-6ea94513f800-ovsdbserver-sb\") pod \"dnsmasq-dns-8b9b87645-k5scf\" (UID: \"e5272984-1396-4916-b718-6ea94513f800\") " pod="openstack/dnsmasq-dns-8b9b87645-k5scf" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.604737 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-7ddc4956b6-dfqsv" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.624471 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e5272984-1396-4916-b718-6ea94513f800-ovsdbserver-sb\") pod \"dnsmasq-dns-8b9b87645-k5scf\" (UID: \"e5272984-1396-4916-b718-6ea94513f800\") " pod="openstack/dnsmasq-dns-8b9b87645-k5scf" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.624729 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9040dbae-017f-4cc9-98c0-6b0228cfa220-config-data\") pod \"barbican-api-84d74cb456-scxkz\" (UID: \"9040dbae-017f-4cc9-98c0-6b0228cfa220\") " pod="openstack/barbican-api-84d74cb456-scxkz" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.624770 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e5272984-1396-4916-b718-6ea94513f800-config\") pod \"dnsmasq-dns-8b9b87645-k5scf\" (UID: \"e5272984-1396-4916-b718-6ea94513f800\") " pod="openstack/dnsmasq-dns-8b9b87645-k5scf" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.624793 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e5272984-1396-4916-b718-6ea94513f800-dns-svc\") pod \"dnsmasq-dns-8b9b87645-k5scf\" (UID: \"e5272984-1396-4916-b718-6ea94513f800\") " pod="openstack/dnsmasq-dns-8b9b87645-k5scf" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.624815 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-25vsn\" (UniqueName: \"kubernetes.io/projected/e5272984-1396-4916-b718-6ea94513f800-kube-api-access-25vsn\") pod \"dnsmasq-dns-8b9b87645-k5scf\" (UID: \"e5272984-1396-4916-b718-6ea94513f800\") " pod="openstack/dnsmasq-dns-8b9b87645-k5scf" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.624853 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e5272984-1396-4916-b718-6ea94513f800-ovsdbserver-nb\") pod \"dnsmasq-dns-8b9b87645-k5scf\" (UID: \"e5272984-1396-4916-b718-6ea94513f800\") " pod="openstack/dnsmasq-dns-8b9b87645-k5scf" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.624875 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vpn6r\" (UniqueName: \"kubernetes.io/projected/9040dbae-017f-4cc9-98c0-6b0228cfa220-kube-api-access-vpn6r\") pod \"barbican-api-84d74cb456-scxkz\" (UID: \"9040dbae-017f-4cc9-98c0-6b0228cfa220\") " pod="openstack/barbican-api-84d74cb456-scxkz" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.624936 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e5272984-1396-4916-b718-6ea94513f800-dns-swift-storage-0\") pod \"dnsmasq-dns-8b9b87645-k5scf\" (UID: \"e5272984-1396-4916-b718-6ea94513f800\") " pod="openstack/dnsmasq-dns-8b9b87645-k5scf" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.624968 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9040dbae-017f-4cc9-98c0-6b0228cfa220-logs\") pod \"barbican-api-84d74cb456-scxkz\" (UID: \"9040dbae-017f-4cc9-98c0-6b0228cfa220\") " pod="openstack/barbican-api-84d74cb456-scxkz" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.624983 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9040dbae-017f-4cc9-98c0-6b0228cfa220-combined-ca-bundle\") pod \"barbican-api-84d74cb456-scxkz\" (UID: \"9040dbae-017f-4cc9-98c0-6b0228cfa220\") " pod="openstack/barbican-api-84d74cb456-scxkz" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.625002 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9040dbae-017f-4cc9-98c0-6b0228cfa220-config-data-custom\") pod \"barbican-api-84d74cb456-scxkz\" (UID: \"9040dbae-017f-4cc9-98c0-6b0228cfa220\") " pod="openstack/barbican-api-84d74cb456-scxkz" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.629652 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e5272984-1396-4916-b718-6ea94513f800-ovsdbserver-sb\") pod \"dnsmasq-dns-8b9b87645-k5scf\" (UID: \"e5272984-1396-4916-b718-6ea94513f800\") " pod="openstack/dnsmasq-dns-8b9b87645-k5scf" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.629948 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e5272984-1396-4916-b718-6ea94513f800-dns-svc\") pod \"dnsmasq-dns-8b9b87645-k5scf\" (UID: \"e5272984-1396-4916-b718-6ea94513f800\") " pod="openstack/dnsmasq-dns-8b9b87645-k5scf" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.630105 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e5272984-1396-4916-b718-6ea94513f800-config\") pod \"dnsmasq-dns-8b9b87645-k5scf\" (UID: \"e5272984-1396-4916-b718-6ea94513f800\") " pod="openstack/dnsmasq-dns-8b9b87645-k5scf" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.630204 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e5272984-1396-4916-b718-6ea94513f800-ovsdbserver-nb\") pod \"dnsmasq-dns-8b9b87645-k5scf\" (UID: \"e5272984-1396-4916-b718-6ea94513f800\") " pod="openstack/dnsmasq-dns-8b9b87645-k5scf" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.634299 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e5272984-1396-4916-b718-6ea94513f800-dns-swift-storage-0\") pod \"dnsmasq-dns-8b9b87645-k5scf\" (UID: \"e5272984-1396-4916-b718-6ea94513f800\") " pod="openstack/dnsmasq-dns-8b9b87645-k5scf" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.642520 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-25vsn\" (UniqueName: \"kubernetes.io/projected/e5272984-1396-4916-b718-6ea94513f800-kube-api-access-25vsn\") pod \"dnsmasq-dns-8b9b87645-k5scf\" (UID: \"e5272984-1396-4916-b718-6ea94513f800\") " pod="openstack/dnsmasq-dns-8b9b87645-k5scf" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.656274 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b9b87645-k5scf" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.726586 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9040dbae-017f-4cc9-98c0-6b0228cfa220-logs\") pod \"barbican-api-84d74cb456-scxkz\" (UID: \"9040dbae-017f-4cc9-98c0-6b0228cfa220\") " pod="openstack/barbican-api-84d74cb456-scxkz" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.726630 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9040dbae-017f-4cc9-98c0-6b0228cfa220-combined-ca-bundle\") pod \"barbican-api-84d74cb456-scxkz\" (UID: \"9040dbae-017f-4cc9-98c0-6b0228cfa220\") " pod="openstack/barbican-api-84d74cb456-scxkz" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.726651 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9040dbae-017f-4cc9-98c0-6b0228cfa220-config-data-custom\") pod \"barbican-api-84d74cb456-scxkz\" (UID: \"9040dbae-017f-4cc9-98c0-6b0228cfa220\") " pod="openstack/barbican-api-84d74cb456-scxkz" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.726696 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9040dbae-017f-4cc9-98c0-6b0228cfa220-config-data\") pod \"barbican-api-84d74cb456-scxkz\" (UID: \"9040dbae-017f-4cc9-98c0-6b0228cfa220\") " pod="openstack/barbican-api-84d74cb456-scxkz" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.726795 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vpn6r\" (UniqueName: \"kubernetes.io/projected/9040dbae-017f-4cc9-98c0-6b0228cfa220-kube-api-access-vpn6r\") pod \"barbican-api-84d74cb456-scxkz\" (UID: \"9040dbae-017f-4cc9-98c0-6b0228cfa220\") " pod="openstack/barbican-api-84d74cb456-scxkz" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.728130 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9040dbae-017f-4cc9-98c0-6b0228cfa220-logs\") pod \"barbican-api-84d74cb456-scxkz\" (UID: \"9040dbae-017f-4cc9-98c0-6b0228cfa220\") " pod="openstack/barbican-api-84d74cb456-scxkz" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.736765 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9040dbae-017f-4cc9-98c0-6b0228cfa220-combined-ca-bundle\") pod \"barbican-api-84d74cb456-scxkz\" (UID: \"9040dbae-017f-4cc9-98c0-6b0228cfa220\") " pod="openstack/barbican-api-84d74cb456-scxkz" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.747406 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9040dbae-017f-4cc9-98c0-6b0228cfa220-config-data-custom\") pod \"barbican-api-84d74cb456-scxkz\" (UID: \"9040dbae-017f-4cc9-98c0-6b0228cfa220\") " pod="openstack/barbican-api-84d74cb456-scxkz" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.749706 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vpn6r\" (UniqueName: \"kubernetes.io/projected/9040dbae-017f-4cc9-98c0-6b0228cfa220-kube-api-access-vpn6r\") pod \"barbican-api-84d74cb456-scxkz\" (UID: \"9040dbae-017f-4cc9-98c0-6b0228cfa220\") " pod="openstack/barbican-api-84d74cb456-scxkz" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.774655 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9040dbae-017f-4cc9-98c0-6b0228cfa220-config-data\") pod \"barbican-api-84d74cb456-scxkz\" (UID: \"9040dbae-017f-4cc9-98c0-6b0228cfa220\") " pod="openstack/barbican-api-84d74cb456-scxkz" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.965118 5037 generic.go:334] "Generic (PLEG): container finished" podID="78b7adc8-c410-4ccf-948a-0d968e60d8b7" containerID="054c2f78fc498fecc3b64d7998923b3399f2ef4cd85d88ab4a737286fbb32ff5" exitCode=0 Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.965234 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-78lfm" event={"ID":"78b7adc8-c410-4ccf-948a-0d968e60d8b7","Type":"ContainerDied","Data":"054c2f78fc498fecc3b64d7998923b3399f2ef4cd85d88ab4a737286fbb32ff5"} Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.972246 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-84d74cb456-scxkz" Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.977276 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-fb548d49-hf8zh" event={"ID":"fe13f626-50c7-4ec3-b967-20f038731571","Type":"ContainerStarted","Data":"ca5593d895153686d827f3a444c0ce51200735ce910a9e9d65ec173d66664c8b"} Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.977336 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-fb548d49-hf8zh" event={"ID":"fe13f626-50c7-4ec3-b967-20f038731571","Type":"ContainerStarted","Data":"b56f68978e382944afd896d3bf1e868b543a2597c945b02949a81cc88ef83c5a"} Nov 26 14:40:41 crc kubenswrapper[5037]: I1126 14:40:41.978508 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-fb548d49-hf8zh" Nov 26 14:40:42 crc kubenswrapper[5037]: I1126 14:40:41.997648 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-854dc8db7d-j5l6c" event={"ID":"c2d75a18-6446-4558-af57-c6e0c957fc3b","Type":"ContainerStarted","Data":"ae38d038fad3bbc384e79c4d7f1e060c20c2d38b3e29519ec6a7891fc4ff742b"} Nov 26 14:40:42 crc kubenswrapper[5037]: I1126 14:40:41.997685 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-854dc8db7d-j5l6c" event={"ID":"c2d75a18-6446-4558-af57-c6e0c957fc3b","Type":"ContainerStarted","Data":"98e55e6e1008fca6ad27dbc8db97cf30687f5fe52197409ea9b8d138f9f80df2"} Nov 26 14:40:42 crc kubenswrapper[5037]: I1126 14:40:41.997700 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-854dc8db7d-j5l6c" Nov 26 14:40:42 crc kubenswrapper[5037]: I1126 14:40:41.997711 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-854dc8db7d-j5l6c" Nov 26 14:40:42 crc kubenswrapper[5037]: I1126 14:40:42.016262 5037 generic.go:334] "Generic (PLEG): container finished" podID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerID="5e69d7717514aa68d798cc4f8eee9b2d5d3e9666ca3b110c2cb4c6b90f9e1181" exitCode=0 Nov 26 14:40:42 crc kubenswrapper[5037]: I1126 14:40:42.016346 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" event={"ID":"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb","Type":"ContainerDied","Data":"5e69d7717514aa68d798cc4f8eee9b2d5d3e9666ca3b110c2cb4c6b90f9e1181"} Nov 26 14:40:42 crc kubenswrapper[5037]: I1126 14:40:42.016385 5037 scope.go:117] "RemoveContainer" containerID="302cbe16bdb6c8873822bf0697d168f893d8457e80a7e1227846608f32db69c8" Nov 26 14:40:42 crc kubenswrapper[5037]: I1126 14:40:42.016721 5037 scope.go:117] "RemoveContainer" containerID="5e69d7717514aa68d798cc4f8eee9b2d5d3e9666ca3b110c2cb4c6b90f9e1181" Nov 26 14:40:42 crc kubenswrapper[5037]: E1126 14:40:42.016909 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:40:42 crc kubenswrapper[5037]: I1126 14:40:42.037118 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-fb548d49-hf8zh" podStartSLOduration=2.037098603 podStartE2EDuration="2.037098603s" podCreationTimestamp="2025-11-26 14:40:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:40:41.998633014 +0000 UTC m=+1508.795403198" watchObservedRunningTime="2025-11-26 14:40:42.037098603 +0000 UTC m=+1508.833868787" Nov 26 14:40:42 crc kubenswrapper[5037]: I1126 14:40:42.081623 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-854dc8db7d-j5l6c" podStartSLOduration=3.081576338 podStartE2EDuration="3.081576338s" podCreationTimestamp="2025-11-26 14:40:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:40:42.029780134 +0000 UTC m=+1508.826550318" watchObservedRunningTime="2025-11-26 14:40:42.081576338 +0000 UTC m=+1508.878367373" Nov 26 14:40:42 crc kubenswrapper[5037]: W1126 14:40:42.096323 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d49cc40_ce20_415f_a979_398430c2bd81.slice/crio-dcb533ef817930e484aa34688ca5ad57eb45ee0e14ade15d278098b83b1288a0 WatchSource:0}: Error finding container dcb533ef817930e484aa34688ca5ad57eb45ee0e14ade15d278098b83b1288a0: Status 404 returned error can't find the container with id dcb533ef817930e484aa34688ca5ad57eb45ee0e14ade15d278098b83b1288a0 Nov 26 14:40:42 crc kubenswrapper[5037]: I1126 14:40:42.151244 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-7c767587b5-nzlv9"] Nov 26 14:40:42 crc kubenswrapper[5037]: I1126 14:40:42.231587 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-7ddc4956b6-dfqsv"] Nov 26 14:40:42 crc kubenswrapper[5037]: W1126 14:40:42.237551 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod19ae84d4_26f8_4e11_bd01_da880def5547.slice/crio-66b2d7343a60d8adf5a02b27199fe1a2a6aa37340e7e0a224cd54e178699dcc7 WatchSource:0}: Error finding container 66b2d7343a60d8adf5a02b27199fe1a2a6aa37340e7e0a224cd54e178699dcc7: Status 404 returned error can't find the container with id 66b2d7343a60d8adf5a02b27199fe1a2a6aa37340e7e0a224cd54e178699dcc7 Nov 26 14:40:42 crc kubenswrapper[5037]: I1126 14:40:42.245387 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8b9b87645-k5scf"] Nov 26 14:40:42 crc kubenswrapper[5037]: I1126 14:40:42.654698 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-84d74cb456-scxkz"] Nov 26 14:40:43 crc kubenswrapper[5037]: I1126 14:40:43.040310 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7ddc4956b6-dfqsv" event={"ID":"19ae84d4-26f8-4e11-bd01-da880def5547","Type":"ContainerStarted","Data":"66b2d7343a60d8adf5a02b27199fe1a2a6aa37340e7e0a224cd54e178699dcc7"} Nov 26 14:40:43 crc kubenswrapper[5037]: I1126 14:40:43.042948 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-84d74cb456-scxkz" event={"ID":"9040dbae-017f-4cc9-98c0-6b0228cfa220","Type":"ContainerStarted","Data":"df4a5b138e39379e1becb18718f2bb4ed479d3c758ce519599b163cabe3455f8"} Nov 26 14:40:43 crc kubenswrapper[5037]: I1126 14:40:43.043006 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-84d74cb456-scxkz" event={"ID":"9040dbae-017f-4cc9-98c0-6b0228cfa220","Type":"ContainerStarted","Data":"216435e2783b1861caaef88c957ae844c4019d4fbd2ce389b4f2288680f02775"} Nov 26 14:40:43 crc kubenswrapper[5037]: I1126 14:40:43.049840 5037 generic.go:334] "Generic (PLEG): container finished" podID="e5272984-1396-4916-b718-6ea94513f800" containerID="e7884b7e25d641abbd68dd90f60e270d79fef021cba18c85ec634748aa808818" exitCode=0 Nov 26 14:40:43 crc kubenswrapper[5037]: I1126 14:40:43.049904 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b9b87645-k5scf" event={"ID":"e5272984-1396-4916-b718-6ea94513f800","Type":"ContainerDied","Data":"e7884b7e25d641abbd68dd90f60e270d79fef021cba18c85ec634748aa808818"} Nov 26 14:40:43 crc kubenswrapper[5037]: I1126 14:40:43.049922 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b9b87645-k5scf" event={"ID":"e5272984-1396-4916-b718-6ea94513f800","Type":"ContainerStarted","Data":"a5cf4b0091e69e0b16f8a5ccca03a823c4ee02144ff06d998b65d8464983cdbf"} Nov 26 14:40:43 crc kubenswrapper[5037]: I1126 14:40:43.052808 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7c767587b5-nzlv9" event={"ID":"6d49cc40-ce20-415f-a979-398430c2bd81","Type":"ContainerStarted","Data":"dcb533ef817930e484aa34688ca5ad57eb45ee0e14ade15d278098b83b1288a0"} Nov 26 14:40:43 crc kubenswrapper[5037]: I1126 14:40:43.062559 5037 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 14:40:43 crc kubenswrapper[5037]: I1126 14:40:43.063862 5037 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 14:40:43 crc kubenswrapper[5037]: I1126 14:40:43.063885 5037 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 14:40:43 crc kubenswrapper[5037]: I1126 14:40:43.327977 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-78lfm" Nov 26 14:40:43 crc kubenswrapper[5037]: I1126 14:40:43.392053 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pmtj4\" (UniqueName: \"kubernetes.io/projected/78b7adc8-c410-4ccf-948a-0d968e60d8b7-kube-api-access-pmtj4\") pod \"78b7adc8-c410-4ccf-948a-0d968e60d8b7\" (UID: \"78b7adc8-c410-4ccf-948a-0d968e60d8b7\") " Nov 26 14:40:43 crc kubenswrapper[5037]: I1126 14:40:43.392220 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/78b7adc8-c410-4ccf-948a-0d968e60d8b7-config\") pod \"78b7adc8-c410-4ccf-948a-0d968e60d8b7\" (UID: \"78b7adc8-c410-4ccf-948a-0d968e60d8b7\") " Nov 26 14:40:43 crc kubenswrapper[5037]: I1126 14:40:43.392248 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78b7adc8-c410-4ccf-948a-0d968e60d8b7-combined-ca-bundle\") pod \"78b7adc8-c410-4ccf-948a-0d968e60d8b7\" (UID: \"78b7adc8-c410-4ccf-948a-0d968e60d8b7\") " Nov 26 14:40:43 crc kubenswrapper[5037]: I1126 14:40:43.396813 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/78b7adc8-c410-4ccf-948a-0d968e60d8b7-kube-api-access-pmtj4" (OuterVolumeSpecName: "kube-api-access-pmtj4") pod "78b7adc8-c410-4ccf-948a-0d968e60d8b7" (UID: "78b7adc8-c410-4ccf-948a-0d968e60d8b7"). InnerVolumeSpecName "kube-api-access-pmtj4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:40:43 crc kubenswrapper[5037]: I1126 14:40:43.416495 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78b7adc8-c410-4ccf-948a-0d968e60d8b7-config" (OuterVolumeSpecName: "config") pod "78b7adc8-c410-4ccf-948a-0d968e60d8b7" (UID: "78b7adc8-c410-4ccf-948a-0d968e60d8b7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:40:43 crc kubenswrapper[5037]: I1126 14:40:43.418796 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78b7adc8-c410-4ccf-948a-0d968e60d8b7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "78b7adc8-c410-4ccf-948a-0d968e60d8b7" (UID: "78b7adc8-c410-4ccf-948a-0d968e60d8b7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:40:43 crc kubenswrapper[5037]: I1126 14:40:43.494399 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pmtj4\" (UniqueName: \"kubernetes.io/projected/78b7adc8-c410-4ccf-948a-0d968e60d8b7-kube-api-access-pmtj4\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:43 crc kubenswrapper[5037]: I1126 14:40:43.494446 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/78b7adc8-c410-4ccf-948a-0d968e60d8b7-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:43 crc kubenswrapper[5037]: I1126 14:40:43.494456 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78b7adc8-c410-4ccf-948a-0d968e60d8b7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:43 crc kubenswrapper[5037]: I1126 14:40:43.639461 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 26 14:40:43 crc kubenswrapper[5037]: I1126 14:40:43.639817 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 26 14:40:43 crc kubenswrapper[5037]: I1126 14:40:43.784192 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 26 14:40:43 crc kubenswrapper[5037]: I1126 14:40:43.880690 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.075896 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-84d74cb456-scxkz" event={"ID":"9040dbae-017f-4cc9-98c0-6b0228cfa220","Type":"ContainerStarted","Data":"a46c24c1c373c3d41cfd0b561ce04fa933d6fc18ae7e8258658363f0d60384c5"} Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.077616 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-84d74cb456-scxkz" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.077665 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-84d74cb456-scxkz" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.082232 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b9b87645-k5scf" event={"ID":"e5272984-1396-4916-b718-6ea94513f800","Type":"ContainerStarted","Data":"240368674a9bbf55c4109aaba92d41f47d292ee7b9c4d2af09988af29b18b231"} Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.084586 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8b9b87645-k5scf" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.087647 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-78lfm" event={"ID":"78b7adc8-c410-4ccf-948a-0d968e60d8b7","Type":"ContainerDied","Data":"a142a543ef1470e50dbf56002053c49af3bc0ad8110f56bca7196341b4c7b2bb"} Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.087718 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a142a543ef1470e50dbf56002053c49af3bc0ad8110f56bca7196341b4c7b2bb" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.088373 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-78lfm" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.124466 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-84d74cb456-scxkz" podStartSLOduration=3.1244467 podStartE2EDuration="3.1244467s" podCreationTimestamp="2025-11-26 14:40:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:40:44.109013534 +0000 UTC m=+1510.905783738" watchObservedRunningTime="2025-11-26 14:40:44.1244467 +0000 UTC m=+1510.921216884" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.169868 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8b9b87645-k5scf" podStartSLOduration=3.169848299 podStartE2EDuration="3.169848299s" podCreationTimestamp="2025-11-26 14:40:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:40:44.137758556 +0000 UTC m=+1510.934528740" watchObservedRunningTime="2025-11-26 14:40:44.169848299 +0000 UTC m=+1510.966618483" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.200348 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8b9b87645-k5scf"] Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.247331 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-66b66f7449-2q2xk"] Nov 26 14:40:44 crc kubenswrapper[5037]: E1126 14:40:44.254524 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78b7adc8-c410-4ccf-948a-0d968e60d8b7" containerName="neutron-db-sync" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.254555 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="78b7adc8-c410-4ccf-948a-0d968e60d8b7" containerName="neutron-db-sync" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.255019 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="78b7adc8-c410-4ccf-948a-0d968e60d8b7" containerName="neutron-db-sync" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.256404 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66b66f7449-2q2xk" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.281567 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-66b66f7449-2q2xk"] Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.299370 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-64b48fff64-cppwc"] Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.301253 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-64b48fff64-cppwc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.303614 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.304339 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.304516 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.304998 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-rt4gf" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.330725 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x85nl\" (UniqueName: \"kubernetes.io/projected/298b5c3b-8afb-4805-90cc-6e13fa47f559-kube-api-access-x85nl\") pod \"neutron-64b48fff64-cppwc\" (UID: \"298b5c3b-8afb-4805-90cc-6e13fa47f559\") " pod="openstack/neutron-64b48fff64-cppwc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.330765 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/698376ab-e89f-4577-9bee-c562e82b32ba-dns-svc\") pod \"dnsmasq-dns-66b66f7449-2q2xk\" (UID: \"698376ab-e89f-4577-9bee-c562e82b32ba\") " pod="openstack/dnsmasq-dns-66b66f7449-2q2xk" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.330921 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/298b5c3b-8afb-4805-90cc-6e13fa47f559-combined-ca-bundle\") pod \"neutron-64b48fff64-cppwc\" (UID: \"298b5c3b-8afb-4805-90cc-6e13fa47f559\") " pod="openstack/neutron-64b48fff64-cppwc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.331316 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/698376ab-e89f-4577-9bee-c562e82b32ba-config\") pod \"dnsmasq-dns-66b66f7449-2q2xk\" (UID: \"698376ab-e89f-4577-9bee-c562e82b32ba\") " pod="openstack/dnsmasq-dns-66b66f7449-2q2xk" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.331359 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/298b5c3b-8afb-4805-90cc-6e13fa47f559-ovndb-tls-certs\") pod \"neutron-64b48fff64-cppwc\" (UID: \"298b5c3b-8afb-4805-90cc-6e13fa47f559\") " pod="openstack/neutron-64b48fff64-cppwc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.331386 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/698376ab-e89f-4577-9bee-c562e82b32ba-ovsdbserver-sb\") pod \"dnsmasq-dns-66b66f7449-2q2xk\" (UID: \"698376ab-e89f-4577-9bee-c562e82b32ba\") " pod="openstack/dnsmasq-dns-66b66f7449-2q2xk" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.331421 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f5bmh\" (UniqueName: \"kubernetes.io/projected/698376ab-e89f-4577-9bee-c562e82b32ba-kube-api-access-f5bmh\") pod \"dnsmasq-dns-66b66f7449-2q2xk\" (UID: \"698376ab-e89f-4577-9bee-c562e82b32ba\") " pod="openstack/dnsmasq-dns-66b66f7449-2q2xk" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.331453 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/698376ab-e89f-4577-9bee-c562e82b32ba-ovsdbserver-nb\") pod \"dnsmasq-dns-66b66f7449-2q2xk\" (UID: \"698376ab-e89f-4577-9bee-c562e82b32ba\") " pod="openstack/dnsmasq-dns-66b66f7449-2q2xk" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.331676 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/298b5c3b-8afb-4805-90cc-6e13fa47f559-httpd-config\") pod \"neutron-64b48fff64-cppwc\" (UID: \"298b5c3b-8afb-4805-90cc-6e13fa47f559\") " pod="openstack/neutron-64b48fff64-cppwc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.331747 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/698376ab-e89f-4577-9bee-c562e82b32ba-dns-swift-storage-0\") pod \"dnsmasq-dns-66b66f7449-2q2xk\" (UID: \"698376ab-e89f-4577-9bee-c562e82b32ba\") " pod="openstack/dnsmasq-dns-66b66f7449-2q2xk" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.331833 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/298b5c3b-8afb-4805-90cc-6e13fa47f559-config\") pod \"neutron-64b48fff64-cppwc\" (UID: \"298b5c3b-8afb-4805-90cc-6e13fa47f559\") " pod="openstack/neutron-64b48fff64-cppwc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.333120 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-64b48fff64-cppwc"] Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.436027 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/298b5c3b-8afb-4805-90cc-6e13fa47f559-ovndb-tls-certs\") pod \"neutron-64b48fff64-cppwc\" (UID: \"298b5c3b-8afb-4805-90cc-6e13fa47f559\") " pod="openstack/neutron-64b48fff64-cppwc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.436088 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/698376ab-e89f-4577-9bee-c562e82b32ba-ovsdbserver-sb\") pod \"dnsmasq-dns-66b66f7449-2q2xk\" (UID: \"698376ab-e89f-4577-9bee-c562e82b32ba\") " pod="openstack/dnsmasq-dns-66b66f7449-2q2xk" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.436120 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f5bmh\" (UniqueName: \"kubernetes.io/projected/698376ab-e89f-4577-9bee-c562e82b32ba-kube-api-access-f5bmh\") pod \"dnsmasq-dns-66b66f7449-2q2xk\" (UID: \"698376ab-e89f-4577-9bee-c562e82b32ba\") " pod="openstack/dnsmasq-dns-66b66f7449-2q2xk" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.436150 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/698376ab-e89f-4577-9bee-c562e82b32ba-ovsdbserver-nb\") pod \"dnsmasq-dns-66b66f7449-2q2xk\" (UID: \"698376ab-e89f-4577-9bee-c562e82b32ba\") " pod="openstack/dnsmasq-dns-66b66f7449-2q2xk" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.436191 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/298b5c3b-8afb-4805-90cc-6e13fa47f559-httpd-config\") pod \"neutron-64b48fff64-cppwc\" (UID: \"298b5c3b-8afb-4805-90cc-6e13fa47f559\") " pod="openstack/neutron-64b48fff64-cppwc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.436233 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/698376ab-e89f-4577-9bee-c562e82b32ba-dns-swift-storage-0\") pod \"dnsmasq-dns-66b66f7449-2q2xk\" (UID: \"698376ab-e89f-4577-9bee-c562e82b32ba\") " pod="openstack/dnsmasq-dns-66b66f7449-2q2xk" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.436271 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/298b5c3b-8afb-4805-90cc-6e13fa47f559-config\") pod \"neutron-64b48fff64-cppwc\" (UID: \"298b5c3b-8afb-4805-90cc-6e13fa47f559\") " pod="openstack/neutron-64b48fff64-cppwc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.436350 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x85nl\" (UniqueName: \"kubernetes.io/projected/298b5c3b-8afb-4805-90cc-6e13fa47f559-kube-api-access-x85nl\") pod \"neutron-64b48fff64-cppwc\" (UID: \"298b5c3b-8afb-4805-90cc-6e13fa47f559\") " pod="openstack/neutron-64b48fff64-cppwc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.436370 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/698376ab-e89f-4577-9bee-c562e82b32ba-dns-svc\") pod \"dnsmasq-dns-66b66f7449-2q2xk\" (UID: \"698376ab-e89f-4577-9bee-c562e82b32ba\") " pod="openstack/dnsmasq-dns-66b66f7449-2q2xk" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.436405 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/298b5c3b-8afb-4805-90cc-6e13fa47f559-combined-ca-bundle\") pod \"neutron-64b48fff64-cppwc\" (UID: \"298b5c3b-8afb-4805-90cc-6e13fa47f559\") " pod="openstack/neutron-64b48fff64-cppwc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.436444 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/698376ab-e89f-4577-9bee-c562e82b32ba-config\") pod \"dnsmasq-dns-66b66f7449-2q2xk\" (UID: \"698376ab-e89f-4577-9bee-c562e82b32ba\") " pod="openstack/dnsmasq-dns-66b66f7449-2q2xk" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.437254 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/698376ab-e89f-4577-9bee-c562e82b32ba-config\") pod \"dnsmasq-dns-66b66f7449-2q2xk\" (UID: \"698376ab-e89f-4577-9bee-c562e82b32ba\") " pod="openstack/dnsmasq-dns-66b66f7449-2q2xk" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.439043 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/698376ab-e89f-4577-9bee-c562e82b32ba-ovsdbserver-nb\") pod \"dnsmasq-dns-66b66f7449-2q2xk\" (UID: \"698376ab-e89f-4577-9bee-c562e82b32ba\") " pod="openstack/dnsmasq-dns-66b66f7449-2q2xk" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.439314 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/698376ab-e89f-4577-9bee-c562e82b32ba-dns-swift-storage-0\") pod \"dnsmasq-dns-66b66f7449-2q2xk\" (UID: \"698376ab-e89f-4577-9bee-c562e82b32ba\") " pod="openstack/dnsmasq-dns-66b66f7449-2q2xk" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.439658 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/698376ab-e89f-4577-9bee-c562e82b32ba-dns-svc\") pod \"dnsmasq-dns-66b66f7449-2q2xk\" (UID: \"698376ab-e89f-4577-9bee-c562e82b32ba\") " pod="openstack/dnsmasq-dns-66b66f7449-2q2xk" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.441753 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/698376ab-e89f-4577-9bee-c562e82b32ba-ovsdbserver-sb\") pod \"dnsmasq-dns-66b66f7449-2q2xk\" (UID: \"698376ab-e89f-4577-9bee-c562e82b32ba\") " pod="openstack/dnsmasq-dns-66b66f7449-2q2xk" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.451127 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/298b5c3b-8afb-4805-90cc-6e13fa47f559-ovndb-tls-certs\") pod \"neutron-64b48fff64-cppwc\" (UID: \"298b5c3b-8afb-4805-90cc-6e13fa47f559\") " pod="openstack/neutron-64b48fff64-cppwc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.451719 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/298b5c3b-8afb-4805-90cc-6e13fa47f559-httpd-config\") pod \"neutron-64b48fff64-cppwc\" (UID: \"298b5c3b-8afb-4805-90cc-6e13fa47f559\") " pod="openstack/neutron-64b48fff64-cppwc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.461378 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/298b5c3b-8afb-4805-90cc-6e13fa47f559-combined-ca-bundle\") pod \"neutron-64b48fff64-cppwc\" (UID: \"298b5c3b-8afb-4805-90cc-6e13fa47f559\") " pod="openstack/neutron-64b48fff64-cppwc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.464134 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/298b5c3b-8afb-4805-90cc-6e13fa47f559-config\") pod \"neutron-64b48fff64-cppwc\" (UID: \"298b5c3b-8afb-4805-90cc-6e13fa47f559\") " pod="openstack/neutron-64b48fff64-cppwc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.466401 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x85nl\" (UniqueName: \"kubernetes.io/projected/298b5c3b-8afb-4805-90cc-6e13fa47f559-kube-api-access-x85nl\") pod \"neutron-64b48fff64-cppwc\" (UID: \"298b5c3b-8afb-4805-90cc-6e13fa47f559\") " pod="openstack/neutron-64b48fff64-cppwc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.484846 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f5bmh\" (UniqueName: \"kubernetes.io/projected/698376ab-e89f-4577-9bee-c562e82b32ba-kube-api-access-f5bmh\") pod \"dnsmasq-dns-66b66f7449-2q2xk\" (UID: \"698376ab-e89f-4577-9bee-c562e82b32ba\") " pod="openstack/dnsmasq-dns-66b66f7449-2q2xk" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.601997 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66b66f7449-2q2xk" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.631581 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-64b48fff64-cppwc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.633733 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-7978b45fdd-7t6zc"] Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.637633 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7978b45fdd-7t6zc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.642577 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.642669 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.647075 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7978b45fdd-7t6zc"] Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.741127 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-logs\") pod \"barbican-api-7978b45fdd-7t6zc\" (UID: \"334f3bb7-793e-4cff-b0ef-de24dc8a46b5\") " pod="openstack/barbican-api-7978b45fdd-7t6zc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.741414 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-public-tls-certs\") pod \"barbican-api-7978b45fdd-7t6zc\" (UID: \"334f3bb7-793e-4cff-b0ef-de24dc8a46b5\") " pod="openstack/barbican-api-7978b45fdd-7t6zc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.741441 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-config-data\") pod \"barbican-api-7978b45fdd-7t6zc\" (UID: \"334f3bb7-793e-4cff-b0ef-de24dc8a46b5\") " pod="openstack/barbican-api-7978b45fdd-7t6zc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.741544 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-295fx\" (UniqueName: \"kubernetes.io/projected/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-kube-api-access-295fx\") pod \"barbican-api-7978b45fdd-7t6zc\" (UID: \"334f3bb7-793e-4cff-b0ef-de24dc8a46b5\") " pod="openstack/barbican-api-7978b45fdd-7t6zc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.741596 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-config-data-custom\") pod \"barbican-api-7978b45fdd-7t6zc\" (UID: \"334f3bb7-793e-4cff-b0ef-de24dc8a46b5\") " pod="openstack/barbican-api-7978b45fdd-7t6zc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.741620 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-combined-ca-bundle\") pod \"barbican-api-7978b45fdd-7t6zc\" (UID: \"334f3bb7-793e-4cff-b0ef-de24dc8a46b5\") " pod="openstack/barbican-api-7978b45fdd-7t6zc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.741638 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-internal-tls-certs\") pod \"barbican-api-7978b45fdd-7t6zc\" (UID: \"334f3bb7-793e-4cff-b0ef-de24dc8a46b5\") " pod="openstack/barbican-api-7978b45fdd-7t6zc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.843710 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-logs\") pod \"barbican-api-7978b45fdd-7t6zc\" (UID: \"334f3bb7-793e-4cff-b0ef-de24dc8a46b5\") " pod="openstack/barbican-api-7978b45fdd-7t6zc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.843773 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-public-tls-certs\") pod \"barbican-api-7978b45fdd-7t6zc\" (UID: \"334f3bb7-793e-4cff-b0ef-de24dc8a46b5\") " pod="openstack/barbican-api-7978b45fdd-7t6zc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.843789 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-config-data\") pod \"barbican-api-7978b45fdd-7t6zc\" (UID: \"334f3bb7-793e-4cff-b0ef-de24dc8a46b5\") " pod="openstack/barbican-api-7978b45fdd-7t6zc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.843894 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-295fx\" (UniqueName: \"kubernetes.io/projected/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-kube-api-access-295fx\") pod \"barbican-api-7978b45fdd-7t6zc\" (UID: \"334f3bb7-793e-4cff-b0ef-de24dc8a46b5\") " pod="openstack/barbican-api-7978b45fdd-7t6zc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.843966 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-config-data-custom\") pod \"barbican-api-7978b45fdd-7t6zc\" (UID: \"334f3bb7-793e-4cff-b0ef-de24dc8a46b5\") " pod="openstack/barbican-api-7978b45fdd-7t6zc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.844006 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-combined-ca-bundle\") pod \"barbican-api-7978b45fdd-7t6zc\" (UID: \"334f3bb7-793e-4cff-b0ef-de24dc8a46b5\") " pod="openstack/barbican-api-7978b45fdd-7t6zc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.844026 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-internal-tls-certs\") pod \"barbican-api-7978b45fdd-7t6zc\" (UID: \"334f3bb7-793e-4cff-b0ef-de24dc8a46b5\") " pod="openstack/barbican-api-7978b45fdd-7t6zc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.844096 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-logs\") pod \"barbican-api-7978b45fdd-7t6zc\" (UID: \"334f3bb7-793e-4cff-b0ef-de24dc8a46b5\") " pod="openstack/barbican-api-7978b45fdd-7t6zc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.849475 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-internal-tls-certs\") pod \"barbican-api-7978b45fdd-7t6zc\" (UID: \"334f3bb7-793e-4cff-b0ef-de24dc8a46b5\") " pod="openstack/barbican-api-7978b45fdd-7t6zc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.849507 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-config-data-custom\") pod \"barbican-api-7978b45fdd-7t6zc\" (UID: \"334f3bb7-793e-4cff-b0ef-de24dc8a46b5\") " pod="openstack/barbican-api-7978b45fdd-7t6zc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.857613 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-combined-ca-bundle\") pod \"barbican-api-7978b45fdd-7t6zc\" (UID: \"334f3bb7-793e-4cff-b0ef-de24dc8a46b5\") " pod="openstack/barbican-api-7978b45fdd-7t6zc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.860239 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-295fx\" (UniqueName: \"kubernetes.io/projected/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-kube-api-access-295fx\") pod \"barbican-api-7978b45fdd-7t6zc\" (UID: \"334f3bb7-793e-4cff-b0ef-de24dc8a46b5\") " pod="openstack/barbican-api-7978b45fdd-7t6zc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.860691 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-public-tls-certs\") pod \"barbican-api-7978b45fdd-7t6zc\" (UID: \"334f3bb7-793e-4cff-b0ef-de24dc8a46b5\") " pod="openstack/barbican-api-7978b45fdd-7t6zc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.862984 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-config-data\") pod \"barbican-api-7978b45fdd-7t6zc\" (UID: \"334f3bb7-793e-4cff-b0ef-de24dc8a46b5\") " pod="openstack/barbican-api-7978b45fdd-7t6zc" Nov 26 14:40:44 crc kubenswrapper[5037]: I1126 14:40:44.963660 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7978b45fdd-7t6zc" Nov 26 14:40:45 crc kubenswrapper[5037]: I1126 14:40:45.929431 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7978b45fdd-7t6zc"] Nov 26 14:40:45 crc kubenswrapper[5037]: I1126 14:40:45.950910 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-64b48fff64-cppwc"] Nov 26 14:40:45 crc kubenswrapper[5037]: I1126 14:40:45.960520 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-66b66f7449-2q2xk"] Nov 26 14:40:46 crc kubenswrapper[5037]: I1126 14:40:46.106173 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7ddc4956b6-dfqsv" event={"ID":"19ae84d4-26f8-4e11-bd01-da880def5547","Type":"ContainerStarted","Data":"355cc9901e399458175cd4640ef40324803629a86ea9a4d2abc2824da07c4f8d"} Nov 26 14:40:46 crc kubenswrapper[5037]: I1126 14:40:46.108209 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7c767587b5-nzlv9" event={"ID":"6d49cc40-ce20-415f-a979-398430c2bd81","Type":"ContainerStarted","Data":"08aa4f4dbe17185b559c1307060da7ba09ed7694916c81cee021536293b3f886"} Nov 26 14:40:46 crc kubenswrapper[5037]: I1126 14:40:46.108243 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7c767587b5-nzlv9" event={"ID":"6d49cc40-ce20-415f-a979-398430c2bd81","Type":"ContainerStarted","Data":"88558c083c5cd020dbbbc7911d8c1ff0846d988d99c33563252e02c9bde2f0cf"} Nov 26 14:40:46 crc kubenswrapper[5037]: I1126 14:40:46.109088 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8b9b87645-k5scf" podUID="e5272984-1396-4916-b718-6ea94513f800" containerName="dnsmasq-dns" containerID="cri-o://240368674a9bbf55c4109aaba92d41f47d292ee7b9c4d2af09988af29b18b231" gracePeriod=10 Nov 26 14:40:46 crc kubenswrapper[5037]: I1126 14:40:46.131895 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-7c767587b5-nzlv9" podStartSLOduration=1.911178907 podStartE2EDuration="5.131879878s" podCreationTimestamp="2025-11-26 14:40:41 +0000 UTC" firstStartedPulling="2025-11-26 14:40:42.112202886 +0000 UTC m=+1508.908973070" lastFinishedPulling="2025-11-26 14:40:45.332903857 +0000 UTC m=+1512.129674041" observedRunningTime="2025-11-26 14:40:46.13152765 +0000 UTC m=+1512.928297834" watchObservedRunningTime="2025-11-26 14:40:46.131879878 +0000 UTC m=+1512.928650062" Nov 26 14:40:47 crc kubenswrapper[5037]: I1126 14:40:47.119392 5037 generic.go:334] "Generic (PLEG): container finished" podID="e5272984-1396-4916-b718-6ea94513f800" containerID="240368674a9bbf55c4109aaba92d41f47d292ee7b9c4d2af09988af29b18b231" exitCode=0 Nov 26 14:40:47 crc kubenswrapper[5037]: I1126 14:40:47.119449 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b9b87645-k5scf" event={"ID":"e5272984-1396-4916-b718-6ea94513f800","Type":"ContainerDied","Data":"240368674a9bbf55c4109aaba92d41f47d292ee7b9c4d2af09988af29b18b231"} Nov 26 14:40:47 crc kubenswrapper[5037]: I1126 14:40:47.629790 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-7ccc6df59c-m5tjx"] Nov 26 14:40:47 crc kubenswrapper[5037]: I1126 14:40:47.631715 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7ccc6df59c-m5tjx" Nov 26 14:40:47 crc kubenswrapper[5037]: I1126 14:40:47.633793 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 26 14:40:47 crc kubenswrapper[5037]: I1126 14:40:47.635900 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 26 14:40:47 crc kubenswrapper[5037]: I1126 14:40:47.654939 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7ccc6df59c-m5tjx"] Nov 26 14:40:47 crc kubenswrapper[5037]: I1126 14:40:47.807935 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a97b4f35-04a7-47c3-a658-170645023de6-internal-tls-certs\") pod \"neutron-7ccc6df59c-m5tjx\" (UID: \"a97b4f35-04a7-47c3-a658-170645023de6\") " pod="openstack/neutron-7ccc6df59c-m5tjx" Nov 26 14:40:47 crc kubenswrapper[5037]: I1126 14:40:47.807975 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzbxh\" (UniqueName: \"kubernetes.io/projected/a97b4f35-04a7-47c3-a658-170645023de6-kube-api-access-rzbxh\") pod \"neutron-7ccc6df59c-m5tjx\" (UID: \"a97b4f35-04a7-47c3-a658-170645023de6\") " pod="openstack/neutron-7ccc6df59c-m5tjx" Nov 26 14:40:47 crc kubenswrapper[5037]: I1126 14:40:47.808003 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a97b4f35-04a7-47c3-a658-170645023de6-ovndb-tls-certs\") pod \"neutron-7ccc6df59c-m5tjx\" (UID: \"a97b4f35-04a7-47c3-a658-170645023de6\") " pod="openstack/neutron-7ccc6df59c-m5tjx" Nov 26 14:40:47 crc kubenswrapper[5037]: I1126 14:40:47.808038 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a97b4f35-04a7-47c3-a658-170645023de6-public-tls-certs\") pod \"neutron-7ccc6df59c-m5tjx\" (UID: \"a97b4f35-04a7-47c3-a658-170645023de6\") " pod="openstack/neutron-7ccc6df59c-m5tjx" Nov 26 14:40:47 crc kubenswrapper[5037]: I1126 14:40:47.808057 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a97b4f35-04a7-47c3-a658-170645023de6-combined-ca-bundle\") pod \"neutron-7ccc6df59c-m5tjx\" (UID: \"a97b4f35-04a7-47c3-a658-170645023de6\") " pod="openstack/neutron-7ccc6df59c-m5tjx" Nov 26 14:40:47 crc kubenswrapper[5037]: I1126 14:40:47.808140 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a97b4f35-04a7-47c3-a658-170645023de6-config\") pod \"neutron-7ccc6df59c-m5tjx\" (UID: \"a97b4f35-04a7-47c3-a658-170645023de6\") " pod="openstack/neutron-7ccc6df59c-m5tjx" Nov 26 14:40:47 crc kubenswrapper[5037]: I1126 14:40:47.808157 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a97b4f35-04a7-47c3-a658-170645023de6-httpd-config\") pod \"neutron-7ccc6df59c-m5tjx\" (UID: \"a97b4f35-04a7-47c3-a658-170645023de6\") " pod="openstack/neutron-7ccc6df59c-m5tjx" Nov 26 14:40:47 crc kubenswrapper[5037]: I1126 14:40:47.910273 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a97b4f35-04a7-47c3-a658-170645023de6-internal-tls-certs\") pod \"neutron-7ccc6df59c-m5tjx\" (UID: \"a97b4f35-04a7-47c3-a658-170645023de6\") " pod="openstack/neutron-7ccc6df59c-m5tjx" Nov 26 14:40:47 crc kubenswrapper[5037]: I1126 14:40:47.910363 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzbxh\" (UniqueName: \"kubernetes.io/projected/a97b4f35-04a7-47c3-a658-170645023de6-kube-api-access-rzbxh\") pod \"neutron-7ccc6df59c-m5tjx\" (UID: \"a97b4f35-04a7-47c3-a658-170645023de6\") " pod="openstack/neutron-7ccc6df59c-m5tjx" Nov 26 14:40:47 crc kubenswrapper[5037]: I1126 14:40:47.910400 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a97b4f35-04a7-47c3-a658-170645023de6-ovndb-tls-certs\") pod \"neutron-7ccc6df59c-m5tjx\" (UID: \"a97b4f35-04a7-47c3-a658-170645023de6\") " pod="openstack/neutron-7ccc6df59c-m5tjx" Nov 26 14:40:47 crc kubenswrapper[5037]: I1126 14:40:47.910447 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a97b4f35-04a7-47c3-a658-170645023de6-public-tls-certs\") pod \"neutron-7ccc6df59c-m5tjx\" (UID: \"a97b4f35-04a7-47c3-a658-170645023de6\") " pod="openstack/neutron-7ccc6df59c-m5tjx" Nov 26 14:40:47 crc kubenswrapper[5037]: I1126 14:40:47.910474 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a97b4f35-04a7-47c3-a658-170645023de6-combined-ca-bundle\") pod \"neutron-7ccc6df59c-m5tjx\" (UID: \"a97b4f35-04a7-47c3-a658-170645023de6\") " pod="openstack/neutron-7ccc6df59c-m5tjx" Nov 26 14:40:47 crc kubenswrapper[5037]: I1126 14:40:47.910574 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a97b4f35-04a7-47c3-a658-170645023de6-config\") pod \"neutron-7ccc6df59c-m5tjx\" (UID: \"a97b4f35-04a7-47c3-a658-170645023de6\") " pod="openstack/neutron-7ccc6df59c-m5tjx" Nov 26 14:40:47 crc kubenswrapper[5037]: I1126 14:40:47.910600 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a97b4f35-04a7-47c3-a658-170645023de6-httpd-config\") pod \"neutron-7ccc6df59c-m5tjx\" (UID: \"a97b4f35-04a7-47c3-a658-170645023de6\") " pod="openstack/neutron-7ccc6df59c-m5tjx" Nov 26 14:40:47 crc kubenswrapper[5037]: I1126 14:40:47.918482 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/a97b4f35-04a7-47c3-a658-170645023de6-config\") pod \"neutron-7ccc6df59c-m5tjx\" (UID: \"a97b4f35-04a7-47c3-a658-170645023de6\") " pod="openstack/neutron-7ccc6df59c-m5tjx" Nov 26 14:40:47 crc kubenswrapper[5037]: I1126 14:40:47.919266 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a97b4f35-04a7-47c3-a658-170645023de6-ovndb-tls-certs\") pod \"neutron-7ccc6df59c-m5tjx\" (UID: \"a97b4f35-04a7-47c3-a658-170645023de6\") " pod="openstack/neutron-7ccc6df59c-m5tjx" Nov 26 14:40:47 crc kubenswrapper[5037]: I1126 14:40:47.921322 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a97b4f35-04a7-47c3-a658-170645023de6-internal-tls-certs\") pod \"neutron-7ccc6df59c-m5tjx\" (UID: \"a97b4f35-04a7-47c3-a658-170645023de6\") " pod="openstack/neutron-7ccc6df59c-m5tjx" Nov 26 14:40:47 crc kubenswrapper[5037]: I1126 14:40:47.924756 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a97b4f35-04a7-47c3-a658-170645023de6-httpd-config\") pod \"neutron-7ccc6df59c-m5tjx\" (UID: \"a97b4f35-04a7-47c3-a658-170645023de6\") " pod="openstack/neutron-7ccc6df59c-m5tjx" Nov 26 14:40:47 crc kubenswrapper[5037]: I1126 14:40:47.928531 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a97b4f35-04a7-47c3-a658-170645023de6-public-tls-certs\") pod \"neutron-7ccc6df59c-m5tjx\" (UID: \"a97b4f35-04a7-47c3-a658-170645023de6\") " pod="openstack/neutron-7ccc6df59c-m5tjx" Nov 26 14:40:47 crc kubenswrapper[5037]: I1126 14:40:47.926982 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a97b4f35-04a7-47c3-a658-170645023de6-combined-ca-bundle\") pod \"neutron-7ccc6df59c-m5tjx\" (UID: \"a97b4f35-04a7-47c3-a658-170645023de6\") " pod="openstack/neutron-7ccc6df59c-m5tjx" Nov 26 14:40:47 crc kubenswrapper[5037]: I1126 14:40:47.928269 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzbxh\" (UniqueName: \"kubernetes.io/projected/a97b4f35-04a7-47c3-a658-170645023de6-kube-api-access-rzbxh\") pod \"neutron-7ccc6df59c-m5tjx\" (UID: \"a97b4f35-04a7-47c3-a658-170645023de6\") " pod="openstack/neutron-7ccc6df59c-m5tjx" Nov 26 14:40:47 crc kubenswrapper[5037]: I1126 14:40:47.958854 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7ccc6df59c-m5tjx" Nov 26 14:40:50 crc kubenswrapper[5037]: I1126 14:40:50.154104 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66b66f7449-2q2xk" event={"ID":"698376ab-e89f-4577-9bee-c562e82b32ba","Type":"ContainerStarted","Data":"ad8fd0a94ef2d905262b31754016f67d4907aa8555e998ed37bbd8c6b1746f2e"} Nov 26 14:40:50 crc kubenswrapper[5037]: I1126 14:40:50.155654 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-64b48fff64-cppwc" event={"ID":"298b5c3b-8afb-4805-90cc-6e13fa47f559","Type":"ContainerStarted","Data":"78c1ca84313ad54257e28c624bc8832f63af0838347c1bedb6b8ffcb27871418"} Nov 26 14:40:50 crc kubenswrapper[5037]: I1126 14:40:50.286108 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b9b87645-k5scf" Nov 26 14:40:50 crc kubenswrapper[5037]: I1126 14:40:50.464196 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e5272984-1396-4916-b718-6ea94513f800-config\") pod \"e5272984-1396-4916-b718-6ea94513f800\" (UID: \"e5272984-1396-4916-b718-6ea94513f800\") " Nov 26 14:40:50 crc kubenswrapper[5037]: I1126 14:40:50.464440 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-25vsn\" (UniqueName: \"kubernetes.io/projected/e5272984-1396-4916-b718-6ea94513f800-kube-api-access-25vsn\") pod \"e5272984-1396-4916-b718-6ea94513f800\" (UID: \"e5272984-1396-4916-b718-6ea94513f800\") " Nov 26 14:40:50 crc kubenswrapper[5037]: I1126 14:40:50.464545 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e5272984-1396-4916-b718-6ea94513f800-ovsdbserver-nb\") pod \"e5272984-1396-4916-b718-6ea94513f800\" (UID: \"e5272984-1396-4916-b718-6ea94513f800\") " Nov 26 14:40:50 crc kubenswrapper[5037]: I1126 14:40:50.464678 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e5272984-1396-4916-b718-6ea94513f800-dns-swift-storage-0\") pod \"e5272984-1396-4916-b718-6ea94513f800\" (UID: \"e5272984-1396-4916-b718-6ea94513f800\") " Nov 26 14:40:50 crc kubenswrapper[5037]: I1126 14:40:50.464762 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e5272984-1396-4916-b718-6ea94513f800-ovsdbserver-sb\") pod \"e5272984-1396-4916-b718-6ea94513f800\" (UID: \"e5272984-1396-4916-b718-6ea94513f800\") " Nov 26 14:40:50 crc kubenswrapper[5037]: I1126 14:40:50.464940 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e5272984-1396-4916-b718-6ea94513f800-dns-svc\") pod \"e5272984-1396-4916-b718-6ea94513f800\" (UID: \"e5272984-1396-4916-b718-6ea94513f800\") " Nov 26 14:40:50 crc kubenswrapper[5037]: I1126 14:40:50.467660 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5272984-1396-4916-b718-6ea94513f800-kube-api-access-25vsn" (OuterVolumeSpecName: "kube-api-access-25vsn") pod "e5272984-1396-4916-b718-6ea94513f800" (UID: "e5272984-1396-4916-b718-6ea94513f800"). InnerVolumeSpecName "kube-api-access-25vsn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:40:50 crc kubenswrapper[5037]: I1126 14:40:50.510721 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e5272984-1396-4916-b718-6ea94513f800-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e5272984-1396-4916-b718-6ea94513f800" (UID: "e5272984-1396-4916-b718-6ea94513f800"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:40:50 crc kubenswrapper[5037]: I1126 14:40:50.516113 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e5272984-1396-4916-b718-6ea94513f800-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e5272984-1396-4916-b718-6ea94513f800" (UID: "e5272984-1396-4916-b718-6ea94513f800"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:40:50 crc kubenswrapper[5037]: I1126 14:40:50.517514 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e5272984-1396-4916-b718-6ea94513f800-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "e5272984-1396-4916-b718-6ea94513f800" (UID: "e5272984-1396-4916-b718-6ea94513f800"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:40:50 crc kubenswrapper[5037]: I1126 14:40:50.522497 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e5272984-1396-4916-b718-6ea94513f800-config" (OuterVolumeSpecName: "config") pod "e5272984-1396-4916-b718-6ea94513f800" (UID: "e5272984-1396-4916-b718-6ea94513f800"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:40:50 crc kubenswrapper[5037]: I1126 14:40:50.538653 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e5272984-1396-4916-b718-6ea94513f800-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e5272984-1396-4916-b718-6ea94513f800" (UID: "e5272984-1396-4916-b718-6ea94513f800"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:40:50 crc kubenswrapper[5037]: I1126 14:40:50.566919 5037 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e5272984-1396-4916-b718-6ea94513f800-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:50 crc kubenswrapper[5037]: I1126 14:40:50.566957 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e5272984-1396-4916-b718-6ea94513f800-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:50 crc kubenswrapper[5037]: I1126 14:40:50.566971 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-25vsn\" (UniqueName: \"kubernetes.io/projected/e5272984-1396-4916-b718-6ea94513f800-kube-api-access-25vsn\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:50 crc kubenswrapper[5037]: I1126 14:40:50.566985 5037 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e5272984-1396-4916-b718-6ea94513f800-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:50 crc kubenswrapper[5037]: I1126 14:40:50.566996 5037 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e5272984-1396-4916-b718-6ea94513f800-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:50 crc kubenswrapper[5037]: I1126 14:40:50.567008 5037 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e5272984-1396-4916-b718-6ea94513f800-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:51 crc kubenswrapper[5037]: I1126 14:40:51.165828 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66b66f7449-2q2xk" event={"ID":"698376ab-e89f-4577-9bee-c562e82b32ba","Type":"ContainerStarted","Data":"806277a00ac5191908812744ccfa1f0988a36d9c73183097577b0ffa8fa3e35b"} Nov 26 14:40:51 crc kubenswrapper[5037]: I1126 14:40:51.168713 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-64b48fff64-cppwc" event={"ID":"298b5c3b-8afb-4805-90cc-6e13fa47f559","Type":"ContainerStarted","Data":"b4375d3a48b5220ef835c2698b095719bcb6787ea1adbe7ef2c0d8398408bf27"} Nov 26 14:40:51 crc kubenswrapper[5037]: I1126 14:40:51.170569 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7978b45fdd-7t6zc" event={"ID":"334f3bb7-793e-4cff-b0ef-de24dc8a46b5","Type":"ContainerStarted","Data":"8556bd1ed449e1303e1a497174956d6682f8d0558538d369de30eb6d4ea4b300"} Nov 26 14:40:51 crc kubenswrapper[5037]: I1126 14:40:51.172689 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b9b87645-k5scf" event={"ID":"e5272984-1396-4916-b718-6ea94513f800","Type":"ContainerDied","Data":"a5cf4b0091e69e0b16f8a5ccca03a823c4ee02144ff06d998b65d8464983cdbf"} Nov 26 14:40:51 crc kubenswrapper[5037]: I1126 14:40:51.172720 5037 scope.go:117] "RemoveContainer" containerID="240368674a9bbf55c4109aaba92d41f47d292ee7b9c4d2af09988af29b18b231" Nov 26 14:40:51 crc kubenswrapper[5037]: I1126 14:40:51.172811 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b9b87645-k5scf" Nov 26 14:40:51 crc kubenswrapper[5037]: I1126 14:40:51.315626 5037 scope.go:117] "RemoveContainer" containerID="e7884b7e25d641abbd68dd90f60e270d79fef021cba18c85ec634748aa808818" Nov 26 14:40:51 crc kubenswrapper[5037]: I1126 14:40:51.321041 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8b9b87645-k5scf"] Nov 26 14:40:51 crc kubenswrapper[5037]: I1126 14:40:51.328103 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8b9b87645-k5scf"] Nov 26 14:40:51 crc kubenswrapper[5037]: I1126 14:40:51.352784 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7ccc6df59c-m5tjx"] Nov 26 14:40:51 crc kubenswrapper[5037]: E1126 14:40:51.699673 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="64dabc01-39ed-4b48-a2d4-4ca7b3070cc4" Nov 26 14:40:51 crc kubenswrapper[5037]: I1126 14:40:51.922202 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e5272984-1396-4916-b718-6ea94513f800" path="/var/lib/kubelet/pods/e5272984-1396-4916-b718-6ea94513f800/volumes" Nov 26 14:40:52 crc kubenswrapper[5037]: I1126 14:40:52.182762 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7ccc6df59c-m5tjx" event={"ID":"a97b4f35-04a7-47c3-a658-170645023de6","Type":"ContainerStarted","Data":"d787d7c57b49308ce496dd3022253165f26b5f2096403db68cdd6ea85914b8a9"} Nov 26 14:40:52 crc kubenswrapper[5037]: I1126 14:40:52.182805 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7ccc6df59c-m5tjx" event={"ID":"a97b4f35-04a7-47c3-a658-170645023de6","Type":"ContainerStarted","Data":"25004b7d7570b0227e943b0f10767fefe0da178777c48537fb23de224173d062"} Nov 26 14:40:52 crc kubenswrapper[5037]: I1126 14:40:52.182815 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7ccc6df59c-m5tjx" event={"ID":"a97b4f35-04a7-47c3-a658-170645023de6","Type":"ContainerStarted","Data":"40d1a9355971e6bd47db0f88636ceaae55624b324fb80c87c755bb998fdbd44e"} Nov 26 14:40:52 crc kubenswrapper[5037]: I1126 14:40:52.183093 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-7ccc6df59c-m5tjx" Nov 26 14:40:52 crc kubenswrapper[5037]: I1126 14:40:52.187178 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-jxxp2" event={"ID":"50b1873a-43ee-426d-99f2-84e8267cb178","Type":"ContainerStarted","Data":"7479fe1b8683d8b8da52186ee20697aea76b2ba23bac017886f41f497a93218e"} Nov 26 14:40:52 crc kubenswrapper[5037]: I1126 14:40:52.189182 5037 generic.go:334] "Generic (PLEG): container finished" podID="698376ab-e89f-4577-9bee-c562e82b32ba" containerID="806277a00ac5191908812744ccfa1f0988a36d9c73183097577b0ffa8fa3e35b" exitCode=0 Nov 26 14:40:52 crc kubenswrapper[5037]: I1126 14:40:52.189441 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66b66f7449-2q2xk" event={"ID":"698376ab-e89f-4577-9bee-c562e82b32ba","Type":"ContainerDied","Data":"806277a00ac5191908812744ccfa1f0988a36d9c73183097577b0ffa8fa3e35b"} Nov 26 14:40:52 crc kubenswrapper[5037]: I1126 14:40:52.191475 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-64b48fff64-cppwc" event={"ID":"298b5c3b-8afb-4805-90cc-6e13fa47f559","Type":"ContainerStarted","Data":"438c6201d6b1523ba7fbc43efafce89a346d81d24d2c43b9d63c88e3e6ba3ae5"} Nov 26 14:40:52 crc kubenswrapper[5037]: I1126 14:40:52.191866 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-64b48fff64-cppwc" Nov 26 14:40:52 crc kubenswrapper[5037]: I1126 14:40:52.198970 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4","Type":"ContainerStarted","Data":"6d347ebb668ec0c1cc7aacba7d11563feb28e307e522d49a22909a1bea1debad"} Nov 26 14:40:52 crc kubenswrapper[5037]: I1126 14:40:52.199402 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 14:40:52 crc kubenswrapper[5037]: I1126 14:40:52.199394 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="64dabc01-39ed-4b48-a2d4-4ca7b3070cc4" containerName="ceilometer-notification-agent" containerID="cri-o://7acd7256521600da6ac1e6813aa3e9cf78cfc3cd65ec85f50d2f45137be61eac" gracePeriod=30 Nov 26 14:40:52 crc kubenswrapper[5037]: I1126 14:40:52.199432 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="64dabc01-39ed-4b48-a2d4-4ca7b3070cc4" containerName="proxy-httpd" containerID="cri-o://6d347ebb668ec0c1cc7aacba7d11563feb28e307e522d49a22909a1bea1debad" gracePeriod=30 Nov 26 14:40:52 crc kubenswrapper[5037]: I1126 14:40:52.199529 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="64dabc01-39ed-4b48-a2d4-4ca7b3070cc4" containerName="sg-core" containerID="cri-o://9e5f3c5c90e9812c570ac0055351a47f991610d4575a96e8c965dcfe4537a190" gracePeriod=30 Nov 26 14:40:52 crc kubenswrapper[5037]: I1126 14:40:52.208041 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7ddc4956b6-dfqsv" event={"ID":"19ae84d4-26f8-4e11-bd01-da880def5547","Type":"ContainerStarted","Data":"73ec95358a687154b2f7af7ab67ff687aabcbeb867fdaa97bcb29864cc40d8c1"} Nov 26 14:40:52 crc kubenswrapper[5037]: I1126 14:40:52.209209 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-7ccc6df59c-m5tjx" podStartSLOduration=5.209194834 podStartE2EDuration="5.209194834s" podCreationTimestamp="2025-11-26 14:40:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:40:52.20575846 +0000 UTC m=+1519.002528654" watchObservedRunningTime="2025-11-26 14:40:52.209194834 +0000 UTC m=+1519.005965018" Nov 26 14:40:52 crc kubenswrapper[5037]: I1126 14:40:52.223438 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7978b45fdd-7t6zc" event={"ID":"334f3bb7-793e-4cff-b0ef-de24dc8a46b5","Type":"ContainerStarted","Data":"2f6e30bd74ea66c491e2959c075dfac83aa041657baf37104388b43c5d325007"} Nov 26 14:40:52 crc kubenswrapper[5037]: I1126 14:40:52.223477 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7978b45fdd-7t6zc" event={"ID":"334f3bb7-793e-4cff-b0ef-de24dc8a46b5","Type":"ContainerStarted","Data":"b1db0ccf747c065689c039923c194e8419b6cd5a8c76ec974b99511a7ede0d79"} Nov 26 14:40:52 crc kubenswrapper[5037]: I1126 14:40:52.224274 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7978b45fdd-7t6zc" Nov 26 14:40:52 crc kubenswrapper[5037]: I1126 14:40:52.224317 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7978b45fdd-7t6zc" Nov 26 14:40:52 crc kubenswrapper[5037]: I1126 14:40:52.246493 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-jxxp2" podStartSLOduration=3.065506661 podStartE2EDuration="47.246474123s" podCreationTimestamp="2025-11-26 14:40:05 +0000 UTC" firstStartedPulling="2025-11-26 14:40:07.059045206 +0000 UTC m=+1473.855815390" lastFinishedPulling="2025-11-26 14:40:51.240012668 +0000 UTC m=+1518.036782852" observedRunningTime="2025-11-26 14:40:52.223246907 +0000 UTC m=+1519.020017091" watchObservedRunningTime="2025-11-26 14:40:52.246474123 +0000 UTC m=+1519.043244307" Nov 26 14:40:52 crc kubenswrapper[5037]: I1126 14:40:52.273035 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-64b48fff64-cppwc" podStartSLOduration=8.270594823 podStartE2EDuration="8.270594823s" podCreationTimestamp="2025-11-26 14:40:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:40:52.238018727 +0000 UTC m=+1519.034788921" watchObservedRunningTime="2025-11-26 14:40:52.270594823 +0000 UTC m=+1519.067365007" Nov 26 14:40:52 crc kubenswrapper[5037]: I1126 14:40:52.324040 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-7978b45fdd-7t6zc" podStartSLOduration=8.324004666 podStartE2EDuration="8.324004666s" podCreationTimestamp="2025-11-26 14:40:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:40:52.319098436 +0000 UTC m=+1519.115868620" watchObservedRunningTime="2025-11-26 14:40:52.324004666 +0000 UTC m=+1519.120774850" Nov 26 14:40:52 crc kubenswrapper[5037]: I1126 14:40:52.340923 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-7ddc4956b6-dfqsv" podStartSLOduration=8.260861811 podStartE2EDuration="11.340908459s" podCreationTimestamp="2025-11-26 14:40:41 +0000 UTC" firstStartedPulling="2025-11-26 14:40:42.251745222 +0000 UTC m=+1509.048515406" lastFinishedPulling="2025-11-26 14:40:45.33179187 +0000 UTC m=+1512.128562054" observedRunningTime="2025-11-26 14:40:52.339225138 +0000 UTC m=+1519.135995322" watchObservedRunningTime="2025-11-26 14:40:52.340908459 +0000 UTC m=+1519.137678643" Nov 26 14:40:53 crc kubenswrapper[5037]: I1126 14:40:53.248172 5037 generic.go:334] "Generic (PLEG): container finished" podID="64dabc01-39ed-4b48-a2d4-4ca7b3070cc4" containerID="6d347ebb668ec0c1cc7aacba7d11563feb28e307e522d49a22909a1bea1debad" exitCode=0 Nov 26 14:40:53 crc kubenswrapper[5037]: I1126 14:40:53.248217 5037 generic.go:334] "Generic (PLEG): container finished" podID="64dabc01-39ed-4b48-a2d4-4ca7b3070cc4" containerID="9e5f3c5c90e9812c570ac0055351a47f991610d4575a96e8c965dcfe4537a190" exitCode=2 Nov 26 14:40:53 crc kubenswrapper[5037]: I1126 14:40:53.248246 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4","Type":"ContainerDied","Data":"6d347ebb668ec0c1cc7aacba7d11563feb28e307e522d49a22909a1bea1debad"} Nov 26 14:40:53 crc kubenswrapper[5037]: I1126 14:40:53.248322 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4","Type":"ContainerDied","Data":"9e5f3c5c90e9812c570ac0055351a47f991610d4575a96e8c965dcfe4537a190"} Nov 26 14:40:53 crc kubenswrapper[5037]: I1126 14:40:53.462427 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-84d74cb456-scxkz" Nov 26 14:40:53 crc kubenswrapper[5037]: I1126 14:40:53.719575 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-84d74cb456-scxkz" Nov 26 14:40:53 crc kubenswrapper[5037]: I1126 14:40:53.913569 5037 scope.go:117] "RemoveContainer" containerID="5e69d7717514aa68d798cc4f8eee9b2d5d3e9666ca3b110c2cb4c6b90f9e1181" Nov 26 14:40:53 crc kubenswrapper[5037]: E1126 14:40:53.913803 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:40:54 crc kubenswrapper[5037]: I1126 14:40:54.270562 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66b66f7449-2q2xk" event={"ID":"698376ab-e89f-4577-9bee-c562e82b32ba","Type":"ContainerStarted","Data":"78b901dad012794cdf465b35ea24402937fd90a8cdaac5e8afdb3f90cb70b5cb"} Nov 26 14:40:54 crc kubenswrapper[5037]: I1126 14:40:54.271509 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-66b66f7449-2q2xk" Nov 26 14:40:54 crc kubenswrapper[5037]: I1126 14:40:54.293391 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-66b66f7449-2q2xk" podStartSLOduration=10.293373684 podStartE2EDuration="10.293373684s" podCreationTimestamp="2025-11-26 14:40:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:40:54.292148984 +0000 UTC m=+1521.088919168" watchObservedRunningTime="2025-11-26 14:40:54.293373684 +0000 UTC m=+1521.090143888" Nov 26 14:40:56 crc kubenswrapper[5037]: I1126 14:40:56.293455 5037 generic.go:334] "Generic (PLEG): container finished" podID="64dabc01-39ed-4b48-a2d4-4ca7b3070cc4" containerID="7acd7256521600da6ac1e6813aa3e9cf78cfc3cd65ec85f50d2f45137be61eac" exitCode=0 Nov 26 14:40:56 crc kubenswrapper[5037]: I1126 14:40:56.293543 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4","Type":"ContainerDied","Data":"7acd7256521600da6ac1e6813aa3e9cf78cfc3cd65ec85f50d2f45137be61eac"} Nov 26 14:40:56 crc kubenswrapper[5037]: I1126 14:40:56.293954 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4","Type":"ContainerDied","Data":"47fb53ba06afded0b1029056c6f2fa286b164e7b15a7cf592aa056b3e55459a0"} Nov 26 14:40:56 crc kubenswrapper[5037]: I1126 14:40:56.293975 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="47fb53ba06afded0b1029056c6f2fa286b164e7b15a7cf592aa056b3e55459a0" Nov 26 14:40:56 crc kubenswrapper[5037]: I1126 14:40:56.311011 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 14:40:56 crc kubenswrapper[5037]: I1126 14:40:56.486405 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-scripts\") pod \"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4\" (UID: \"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4\") " Nov 26 14:40:56 crc kubenswrapper[5037]: I1126 14:40:56.486552 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5zzhd\" (UniqueName: \"kubernetes.io/projected/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-kube-api-access-5zzhd\") pod \"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4\" (UID: \"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4\") " Nov 26 14:40:56 crc kubenswrapper[5037]: I1126 14:40:56.486603 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-log-httpd\") pod \"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4\" (UID: \"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4\") " Nov 26 14:40:56 crc kubenswrapper[5037]: I1126 14:40:56.486629 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-run-httpd\") pod \"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4\" (UID: \"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4\") " Nov 26 14:40:56 crc kubenswrapper[5037]: I1126 14:40:56.486672 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-sg-core-conf-yaml\") pod \"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4\" (UID: \"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4\") " Nov 26 14:40:56 crc kubenswrapper[5037]: I1126 14:40:56.486757 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-config-data\") pod \"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4\" (UID: \"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4\") " Nov 26 14:40:56 crc kubenswrapper[5037]: I1126 14:40:56.486800 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-combined-ca-bundle\") pod \"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4\" (UID: \"64dabc01-39ed-4b48-a2d4-4ca7b3070cc4\") " Nov 26 14:40:56 crc kubenswrapper[5037]: I1126 14:40:56.487162 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "64dabc01-39ed-4b48-a2d4-4ca7b3070cc4" (UID: "64dabc01-39ed-4b48-a2d4-4ca7b3070cc4"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:40:56 crc kubenswrapper[5037]: I1126 14:40:56.487480 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "64dabc01-39ed-4b48-a2d4-4ca7b3070cc4" (UID: "64dabc01-39ed-4b48-a2d4-4ca7b3070cc4"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:40:56 crc kubenswrapper[5037]: I1126 14:40:56.487923 5037 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:56 crc kubenswrapper[5037]: I1126 14:40:56.488043 5037 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:56 crc kubenswrapper[5037]: I1126 14:40:56.492156 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-scripts" (OuterVolumeSpecName: "scripts") pod "64dabc01-39ed-4b48-a2d4-4ca7b3070cc4" (UID: "64dabc01-39ed-4b48-a2d4-4ca7b3070cc4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:40:56 crc kubenswrapper[5037]: I1126 14:40:56.492994 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-kube-api-access-5zzhd" (OuterVolumeSpecName: "kube-api-access-5zzhd") pod "64dabc01-39ed-4b48-a2d4-4ca7b3070cc4" (UID: "64dabc01-39ed-4b48-a2d4-4ca7b3070cc4"). InnerVolumeSpecName "kube-api-access-5zzhd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:40:56 crc kubenswrapper[5037]: I1126 14:40:56.517031 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "64dabc01-39ed-4b48-a2d4-4ca7b3070cc4" (UID: "64dabc01-39ed-4b48-a2d4-4ca7b3070cc4"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:40:56 crc kubenswrapper[5037]: I1126 14:40:56.542622 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "64dabc01-39ed-4b48-a2d4-4ca7b3070cc4" (UID: "64dabc01-39ed-4b48-a2d4-4ca7b3070cc4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:40:56 crc kubenswrapper[5037]: I1126 14:40:56.569682 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-config-data" (OuterVolumeSpecName: "config-data") pod "64dabc01-39ed-4b48-a2d4-4ca7b3070cc4" (UID: "64dabc01-39ed-4b48-a2d4-4ca7b3070cc4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:40:56 crc kubenswrapper[5037]: I1126 14:40:56.590372 5037 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:56 crc kubenswrapper[5037]: I1126 14:40:56.590402 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5zzhd\" (UniqueName: \"kubernetes.io/projected/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-kube-api-access-5zzhd\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:56 crc kubenswrapper[5037]: I1126 14:40:56.590414 5037 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:56 crc kubenswrapper[5037]: I1126 14:40:56.590425 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:56 crc kubenswrapper[5037]: I1126 14:40:56.590434 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.301806 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.398994 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.410094 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.434587 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:40:57 crc kubenswrapper[5037]: E1126 14:40:57.435022 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5272984-1396-4916-b718-6ea94513f800" containerName="init" Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.435046 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5272984-1396-4916-b718-6ea94513f800" containerName="init" Nov 26 14:40:57 crc kubenswrapper[5037]: E1126 14:40:57.436462 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5272984-1396-4916-b718-6ea94513f800" containerName="dnsmasq-dns" Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.436478 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5272984-1396-4916-b718-6ea94513f800" containerName="dnsmasq-dns" Nov 26 14:40:57 crc kubenswrapper[5037]: E1126 14:40:57.436502 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64dabc01-39ed-4b48-a2d4-4ca7b3070cc4" containerName="sg-core" Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.436512 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="64dabc01-39ed-4b48-a2d4-4ca7b3070cc4" containerName="sg-core" Nov 26 14:40:57 crc kubenswrapper[5037]: E1126 14:40:57.436534 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64dabc01-39ed-4b48-a2d4-4ca7b3070cc4" containerName="proxy-httpd" Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.436542 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="64dabc01-39ed-4b48-a2d4-4ca7b3070cc4" containerName="proxy-httpd" Nov 26 14:40:57 crc kubenswrapper[5037]: E1126 14:40:57.436570 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64dabc01-39ed-4b48-a2d4-4ca7b3070cc4" containerName="ceilometer-notification-agent" Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.436578 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="64dabc01-39ed-4b48-a2d4-4ca7b3070cc4" containerName="ceilometer-notification-agent" Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.436862 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="64dabc01-39ed-4b48-a2d4-4ca7b3070cc4" containerName="sg-core" Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.436889 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="64dabc01-39ed-4b48-a2d4-4ca7b3070cc4" containerName="ceilometer-notification-agent" Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.436904 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5272984-1396-4916-b718-6ea94513f800" containerName="dnsmasq-dns" Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.436917 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="64dabc01-39ed-4b48-a2d4-4ca7b3070cc4" containerName="proxy-httpd" Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.438763 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.444978 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.445179 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.455234 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.607714 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ec55994-5e0e-4f1e-aece-501836b46c63-config-data\") pod \"ceilometer-0\" (UID: \"8ec55994-5e0e-4f1e-aece-501836b46c63\") " pod="openstack/ceilometer-0" Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.607772 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8ec55994-5e0e-4f1e-aece-501836b46c63-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8ec55994-5e0e-4f1e-aece-501836b46c63\") " pod="openstack/ceilometer-0" Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.607829 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8ec55994-5e0e-4f1e-aece-501836b46c63-log-httpd\") pod \"ceilometer-0\" (UID: \"8ec55994-5e0e-4f1e-aece-501836b46c63\") " pod="openstack/ceilometer-0" Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.607864 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8ec55994-5e0e-4f1e-aece-501836b46c63-run-httpd\") pod \"ceilometer-0\" (UID: \"8ec55994-5e0e-4f1e-aece-501836b46c63\") " pod="openstack/ceilometer-0" Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.607883 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ec55994-5e0e-4f1e-aece-501836b46c63-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8ec55994-5e0e-4f1e-aece-501836b46c63\") " pod="openstack/ceilometer-0" Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.607927 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9wdh4\" (UniqueName: \"kubernetes.io/projected/8ec55994-5e0e-4f1e-aece-501836b46c63-kube-api-access-9wdh4\") pod \"ceilometer-0\" (UID: \"8ec55994-5e0e-4f1e-aece-501836b46c63\") " pod="openstack/ceilometer-0" Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.608168 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8ec55994-5e0e-4f1e-aece-501836b46c63-scripts\") pod \"ceilometer-0\" (UID: \"8ec55994-5e0e-4f1e-aece-501836b46c63\") " pod="openstack/ceilometer-0" Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.709608 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8ec55994-5e0e-4f1e-aece-501836b46c63-scripts\") pod \"ceilometer-0\" (UID: \"8ec55994-5e0e-4f1e-aece-501836b46c63\") " pod="openstack/ceilometer-0" Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.709979 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ec55994-5e0e-4f1e-aece-501836b46c63-config-data\") pod \"ceilometer-0\" (UID: \"8ec55994-5e0e-4f1e-aece-501836b46c63\") " pod="openstack/ceilometer-0" Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.710002 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8ec55994-5e0e-4f1e-aece-501836b46c63-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8ec55994-5e0e-4f1e-aece-501836b46c63\") " pod="openstack/ceilometer-0" Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.710039 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8ec55994-5e0e-4f1e-aece-501836b46c63-log-httpd\") pod \"ceilometer-0\" (UID: \"8ec55994-5e0e-4f1e-aece-501836b46c63\") " pod="openstack/ceilometer-0" Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.710061 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8ec55994-5e0e-4f1e-aece-501836b46c63-run-httpd\") pod \"ceilometer-0\" (UID: \"8ec55994-5e0e-4f1e-aece-501836b46c63\") " pod="openstack/ceilometer-0" Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.710077 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ec55994-5e0e-4f1e-aece-501836b46c63-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8ec55994-5e0e-4f1e-aece-501836b46c63\") " pod="openstack/ceilometer-0" Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.710118 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9wdh4\" (UniqueName: \"kubernetes.io/projected/8ec55994-5e0e-4f1e-aece-501836b46c63-kube-api-access-9wdh4\") pod \"ceilometer-0\" (UID: \"8ec55994-5e0e-4f1e-aece-501836b46c63\") " pod="openstack/ceilometer-0" Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.711878 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8ec55994-5e0e-4f1e-aece-501836b46c63-run-httpd\") pod \"ceilometer-0\" (UID: \"8ec55994-5e0e-4f1e-aece-501836b46c63\") " pod="openstack/ceilometer-0" Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.712273 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8ec55994-5e0e-4f1e-aece-501836b46c63-log-httpd\") pod \"ceilometer-0\" (UID: \"8ec55994-5e0e-4f1e-aece-501836b46c63\") " pod="openstack/ceilometer-0" Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.714825 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8ec55994-5e0e-4f1e-aece-501836b46c63-scripts\") pod \"ceilometer-0\" (UID: \"8ec55994-5e0e-4f1e-aece-501836b46c63\") " pod="openstack/ceilometer-0" Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.715921 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ec55994-5e0e-4f1e-aece-501836b46c63-config-data\") pod \"ceilometer-0\" (UID: \"8ec55994-5e0e-4f1e-aece-501836b46c63\") " pod="openstack/ceilometer-0" Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.716705 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8ec55994-5e0e-4f1e-aece-501836b46c63-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8ec55994-5e0e-4f1e-aece-501836b46c63\") " pod="openstack/ceilometer-0" Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.725780 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ec55994-5e0e-4f1e-aece-501836b46c63-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8ec55994-5e0e-4f1e-aece-501836b46c63\") " pod="openstack/ceilometer-0" Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.728096 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9wdh4\" (UniqueName: \"kubernetes.io/projected/8ec55994-5e0e-4f1e-aece-501836b46c63-kube-api-access-9wdh4\") pod \"ceilometer-0\" (UID: \"8ec55994-5e0e-4f1e-aece-501836b46c63\") " pod="openstack/ceilometer-0" Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.772465 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 14:40:57 crc kubenswrapper[5037]: I1126 14:40:57.922487 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="64dabc01-39ed-4b48-a2d4-4ca7b3070cc4" path="/var/lib/kubelet/pods/64dabc01-39ed-4b48-a2d4-4ca7b3070cc4/volumes" Nov 26 14:40:58 crc kubenswrapper[5037]: I1126 14:40:58.225264 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:40:58 crc kubenswrapper[5037]: I1126 14:40:58.315086 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8ec55994-5e0e-4f1e-aece-501836b46c63","Type":"ContainerStarted","Data":"ac0bb50783c69d6f012d53c2003400e9b811eb2df950f24d895fc55e4d2f4efa"} Nov 26 14:40:59 crc kubenswrapper[5037]: I1126 14:40:59.324520 5037 generic.go:334] "Generic (PLEG): container finished" podID="50b1873a-43ee-426d-99f2-84e8267cb178" containerID="7479fe1b8683d8b8da52186ee20697aea76b2ba23bac017886f41f497a93218e" exitCode=0 Nov 26 14:40:59 crc kubenswrapper[5037]: I1126 14:40:59.324655 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-jxxp2" event={"ID":"50b1873a-43ee-426d-99f2-84e8267cb178","Type":"ContainerDied","Data":"7479fe1b8683d8b8da52186ee20697aea76b2ba23bac017886f41f497a93218e"} Nov 26 14:40:59 crc kubenswrapper[5037]: I1126 14:40:59.327407 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8ec55994-5e0e-4f1e-aece-501836b46c63","Type":"ContainerStarted","Data":"ecfe02c1dc20c77b83de3aad46edd5f2d8609268e30702f511a086bdc4e1820b"} Nov 26 14:40:59 crc kubenswrapper[5037]: I1126 14:40:59.604607 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-66b66f7449-2q2xk" Nov 26 14:40:59 crc kubenswrapper[5037]: I1126 14:40:59.710274 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-798745f775-n9xtc"] Nov 26 14:40:59 crc kubenswrapper[5037]: I1126 14:40:59.710561 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-798745f775-n9xtc" podUID="de74d309-5e9e-4cd1-8d1a-c136ac9fa51c" containerName="dnsmasq-dns" containerID="cri-o://5507a7b9e665ee05018d8b0104c709a284e64f53e42616c429a808983a03f6e0" gracePeriod=10 Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.252881 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-798745f775-n9xtc" Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.336423 5037 generic.go:334] "Generic (PLEG): container finished" podID="de74d309-5e9e-4cd1-8d1a-c136ac9fa51c" containerID="5507a7b9e665ee05018d8b0104c709a284e64f53e42616c429a808983a03f6e0" exitCode=0 Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.336481 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-798745f775-n9xtc" Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.336480 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-798745f775-n9xtc" event={"ID":"de74d309-5e9e-4cd1-8d1a-c136ac9fa51c","Type":"ContainerDied","Data":"5507a7b9e665ee05018d8b0104c709a284e64f53e42616c429a808983a03f6e0"} Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.336545 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-798745f775-n9xtc" event={"ID":"de74d309-5e9e-4cd1-8d1a-c136ac9fa51c","Type":"ContainerDied","Data":"8660e37195c98085384470c07716109c5286cc2e603121279dd8fadb6a194df4"} Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.336566 5037 scope.go:117] "RemoveContainer" containerID="5507a7b9e665ee05018d8b0104c709a284e64f53e42616c429a808983a03f6e0" Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.359523 5037 scope.go:117] "RemoveContainer" containerID="6f9c14f89c8dfdcd45902080be403b8011e5c14a7ba8450991d55d4589fc0109" Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.366723 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/de74d309-5e9e-4cd1-8d1a-c136ac9fa51c-dns-swift-storage-0\") pod \"de74d309-5e9e-4cd1-8d1a-c136ac9fa51c\" (UID: \"de74d309-5e9e-4cd1-8d1a-c136ac9fa51c\") " Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.366804 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/de74d309-5e9e-4cd1-8d1a-c136ac9fa51c-ovsdbserver-nb\") pod \"de74d309-5e9e-4cd1-8d1a-c136ac9fa51c\" (UID: \"de74d309-5e9e-4cd1-8d1a-c136ac9fa51c\") " Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.366844 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/de74d309-5e9e-4cd1-8d1a-c136ac9fa51c-dns-svc\") pod \"de74d309-5e9e-4cd1-8d1a-c136ac9fa51c\" (UID: \"de74d309-5e9e-4cd1-8d1a-c136ac9fa51c\") " Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.366864 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfz6m\" (UniqueName: \"kubernetes.io/projected/de74d309-5e9e-4cd1-8d1a-c136ac9fa51c-kube-api-access-kfz6m\") pod \"de74d309-5e9e-4cd1-8d1a-c136ac9fa51c\" (UID: \"de74d309-5e9e-4cd1-8d1a-c136ac9fa51c\") " Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.366904 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/de74d309-5e9e-4cd1-8d1a-c136ac9fa51c-ovsdbserver-sb\") pod \"de74d309-5e9e-4cd1-8d1a-c136ac9fa51c\" (UID: \"de74d309-5e9e-4cd1-8d1a-c136ac9fa51c\") " Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.367391 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/de74d309-5e9e-4cd1-8d1a-c136ac9fa51c-config\") pod \"de74d309-5e9e-4cd1-8d1a-c136ac9fa51c\" (UID: \"de74d309-5e9e-4cd1-8d1a-c136ac9fa51c\") " Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.385171 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de74d309-5e9e-4cd1-8d1a-c136ac9fa51c-kube-api-access-kfz6m" (OuterVolumeSpecName: "kube-api-access-kfz6m") pod "de74d309-5e9e-4cd1-8d1a-c136ac9fa51c" (UID: "de74d309-5e9e-4cd1-8d1a-c136ac9fa51c"). InnerVolumeSpecName "kube-api-access-kfz6m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.426606 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/de74d309-5e9e-4cd1-8d1a-c136ac9fa51c-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "de74d309-5e9e-4cd1-8d1a-c136ac9fa51c" (UID: "de74d309-5e9e-4cd1-8d1a-c136ac9fa51c"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.431792 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/de74d309-5e9e-4cd1-8d1a-c136ac9fa51c-config" (OuterVolumeSpecName: "config") pod "de74d309-5e9e-4cd1-8d1a-c136ac9fa51c" (UID: "de74d309-5e9e-4cd1-8d1a-c136ac9fa51c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.469971 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/de74d309-5e9e-4cd1-8d1a-c136ac9fa51c-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.470000 5037 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/de74d309-5e9e-4cd1-8d1a-c136ac9fa51c-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.470009 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfz6m\" (UniqueName: \"kubernetes.io/projected/de74d309-5e9e-4cd1-8d1a-c136ac9fa51c-kube-api-access-kfz6m\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.472692 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/de74d309-5e9e-4cd1-8d1a-c136ac9fa51c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "de74d309-5e9e-4cd1-8d1a-c136ac9fa51c" (UID: "de74d309-5e9e-4cd1-8d1a-c136ac9fa51c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.487335 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/de74d309-5e9e-4cd1-8d1a-c136ac9fa51c-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "de74d309-5e9e-4cd1-8d1a-c136ac9fa51c" (UID: "de74d309-5e9e-4cd1-8d1a-c136ac9fa51c"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.490439 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/de74d309-5e9e-4cd1-8d1a-c136ac9fa51c-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "de74d309-5e9e-4cd1-8d1a-c136ac9fa51c" (UID: "de74d309-5e9e-4cd1-8d1a-c136ac9fa51c"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.515965 5037 scope.go:117] "RemoveContainer" containerID="5507a7b9e665ee05018d8b0104c709a284e64f53e42616c429a808983a03f6e0" Nov 26 14:41:00 crc kubenswrapper[5037]: E1126 14:41:00.521531 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5507a7b9e665ee05018d8b0104c709a284e64f53e42616c429a808983a03f6e0\": container with ID starting with 5507a7b9e665ee05018d8b0104c709a284e64f53e42616c429a808983a03f6e0 not found: ID does not exist" containerID="5507a7b9e665ee05018d8b0104c709a284e64f53e42616c429a808983a03f6e0" Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.521582 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5507a7b9e665ee05018d8b0104c709a284e64f53e42616c429a808983a03f6e0"} err="failed to get container status \"5507a7b9e665ee05018d8b0104c709a284e64f53e42616c429a808983a03f6e0\": rpc error: code = NotFound desc = could not find container \"5507a7b9e665ee05018d8b0104c709a284e64f53e42616c429a808983a03f6e0\": container with ID starting with 5507a7b9e665ee05018d8b0104c709a284e64f53e42616c429a808983a03f6e0 not found: ID does not exist" Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.521624 5037 scope.go:117] "RemoveContainer" containerID="6f9c14f89c8dfdcd45902080be403b8011e5c14a7ba8450991d55d4589fc0109" Nov 26 14:41:00 crc kubenswrapper[5037]: E1126 14:41:00.543466 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6f9c14f89c8dfdcd45902080be403b8011e5c14a7ba8450991d55d4589fc0109\": container with ID starting with 6f9c14f89c8dfdcd45902080be403b8011e5c14a7ba8450991d55d4589fc0109 not found: ID does not exist" containerID="6f9c14f89c8dfdcd45902080be403b8011e5c14a7ba8450991d55d4589fc0109" Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.543515 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f9c14f89c8dfdcd45902080be403b8011e5c14a7ba8450991d55d4589fc0109"} err="failed to get container status \"6f9c14f89c8dfdcd45902080be403b8011e5c14a7ba8450991d55d4589fc0109\": rpc error: code = NotFound desc = could not find container \"6f9c14f89c8dfdcd45902080be403b8011e5c14a7ba8450991d55d4589fc0109\": container with ID starting with 6f9c14f89c8dfdcd45902080be403b8011e5c14a7ba8450991d55d4589fc0109 not found: ID does not exist" Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.571739 5037 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/de74d309-5e9e-4cd1-8d1a-c136ac9fa51c-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.571767 5037 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/de74d309-5e9e-4cd1-8d1a-c136ac9fa51c-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.571776 5037 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/de74d309-5e9e-4cd1-8d1a-c136ac9fa51c-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.674056 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-jxxp2" Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.674480 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-798745f775-n9xtc"] Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.692699 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-798745f775-n9xtc"] Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.773638 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jt7vt\" (UniqueName: \"kubernetes.io/projected/50b1873a-43ee-426d-99f2-84e8267cb178-kube-api-access-jt7vt\") pod \"50b1873a-43ee-426d-99f2-84e8267cb178\" (UID: \"50b1873a-43ee-426d-99f2-84e8267cb178\") " Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.773728 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/50b1873a-43ee-426d-99f2-84e8267cb178-db-sync-config-data\") pod \"50b1873a-43ee-426d-99f2-84e8267cb178\" (UID: \"50b1873a-43ee-426d-99f2-84e8267cb178\") " Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.774362 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/50b1873a-43ee-426d-99f2-84e8267cb178-scripts\") pod \"50b1873a-43ee-426d-99f2-84e8267cb178\" (UID: \"50b1873a-43ee-426d-99f2-84e8267cb178\") " Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.774483 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/50b1873a-43ee-426d-99f2-84e8267cb178-etc-machine-id\") pod \"50b1873a-43ee-426d-99f2-84e8267cb178\" (UID: \"50b1873a-43ee-426d-99f2-84e8267cb178\") " Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.774509 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50b1873a-43ee-426d-99f2-84e8267cb178-combined-ca-bundle\") pod \"50b1873a-43ee-426d-99f2-84e8267cb178\" (UID: \"50b1873a-43ee-426d-99f2-84e8267cb178\") " Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.774547 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50b1873a-43ee-426d-99f2-84e8267cb178-config-data\") pod \"50b1873a-43ee-426d-99f2-84e8267cb178\" (UID: \"50b1873a-43ee-426d-99f2-84e8267cb178\") " Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.777773 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/50b1873a-43ee-426d-99f2-84e8267cb178-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "50b1873a-43ee-426d-99f2-84e8267cb178" (UID: "50b1873a-43ee-426d-99f2-84e8267cb178"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.777852 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50b1873a-43ee-426d-99f2-84e8267cb178-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "50b1873a-43ee-426d-99f2-84e8267cb178" (UID: "50b1873a-43ee-426d-99f2-84e8267cb178"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.781425 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50b1873a-43ee-426d-99f2-84e8267cb178-scripts" (OuterVolumeSpecName: "scripts") pod "50b1873a-43ee-426d-99f2-84e8267cb178" (UID: "50b1873a-43ee-426d-99f2-84e8267cb178"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.781458 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/50b1873a-43ee-426d-99f2-84e8267cb178-kube-api-access-jt7vt" (OuterVolumeSpecName: "kube-api-access-jt7vt") pod "50b1873a-43ee-426d-99f2-84e8267cb178" (UID: "50b1873a-43ee-426d-99f2-84e8267cb178"). InnerVolumeSpecName "kube-api-access-jt7vt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.806839 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50b1873a-43ee-426d-99f2-84e8267cb178-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "50b1873a-43ee-426d-99f2-84e8267cb178" (UID: "50b1873a-43ee-426d-99f2-84e8267cb178"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.835391 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50b1873a-43ee-426d-99f2-84e8267cb178-config-data" (OuterVolumeSpecName: "config-data") pod "50b1873a-43ee-426d-99f2-84e8267cb178" (UID: "50b1873a-43ee-426d-99f2-84e8267cb178"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.876126 5037 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/50b1873a-43ee-426d-99f2-84e8267cb178-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.876161 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50b1873a-43ee-426d-99f2-84e8267cb178-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.876171 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50b1873a-43ee-426d-99f2-84e8267cb178-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.876183 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jt7vt\" (UniqueName: \"kubernetes.io/projected/50b1873a-43ee-426d-99f2-84e8267cb178-kube-api-access-jt7vt\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.876194 5037 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/50b1873a-43ee-426d-99f2-84e8267cb178-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:00 crc kubenswrapper[5037]: I1126 14:41:00.876202 5037 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/50b1873a-43ee-426d-99f2-84e8267cb178-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.361426 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8ec55994-5e0e-4f1e-aece-501836b46c63","Type":"ContainerStarted","Data":"1cac0a928a182d949345b3bf28a7eb70e03e598482ad70f0156fa523bf850643"} Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.373333 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8ec55994-5e0e-4f1e-aece-501836b46c63","Type":"ContainerStarted","Data":"77e3b705d52ae0fd1eec5a8ec00e21c0da85ce8a8b08e853102e7ce45712d1e1"} Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.398157 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-jxxp2" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.398182 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-jxxp2" event={"ID":"50b1873a-43ee-426d-99f2-84e8267cb178","Type":"ContainerDied","Data":"21dbbaa10727f20e523901b7474c1b27fdf7c8abd4e5a9c9cda87d97bb2eb29c"} Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.398736 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="21dbbaa10727f20e523901b7474c1b27fdf7c8abd4e5a9c9cda87d97bb2eb29c" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.574643 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 14:41:01 crc kubenswrapper[5037]: E1126 14:41:01.574999 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de74d309-5e9e-4cd1-8d1a-c136ac9fa51c" containerName="dnsmasq-dns" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.575018 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="de74d309-5e9e-4cd1-8d1a-c136ac9fa51c" containerName="dnsmasq-dns" Nov 26 14:41:01 crc kubenswrapper[5037]: E1126 14:41:01.575049 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de74d309-5e9e-4cd1-8d1a-c136ac9fa51c" containerName="init" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.575056 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="de74d309-5e9e-4cd1-8d1a-c136ac9fa51c" containerName="init" Nov 26 14:41:01 crc kubenswrapper[5037]: E1126 14:41:01.575078 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50b1873a-43ee-426d-99f2-84e8267cb178" containerName="cinder-db-sync" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.575086 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="50b1873a-43ee-426d-99f2-84e8267cb178" containerName="cinder-db-sync" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.575250 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="50b1873a-43ee-426d-99f2-84e8267cb178" containerName="cinder-db-sync" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.575268 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="de74d309-5e9e-4cd1-8d1a-c136ac9fa51c" containerName="dnsmasq-dns" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.576235 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.585030 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.585135 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-784g4" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.600122 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.600339 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.616147 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.671583 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7965876c4f-xzpq9"] Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.673430 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7965876c4f-xzpq9" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.691960 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cs85k\" (UniqueName: \"kubernetes.io/projected/24c45d25-9050-4e29-bffa-d6649cb506b9-kube-api-access-cs85k\") pod \"cinder-scheduler-0\" (UID: \"24c45d25-9050-4e29-bffa-d6649cb506b9\") " pod="openstack/cinder-scheduler-0" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.692052 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/24c45d25-9050-4e29-bffa-d6649cb506b9-scripts\") pod \"cinder-scheduler-0\" (UID: \"24c45d25-9050-4e29-bffa-d6649cb506b9\") " pod="openstack/cinder-scheduler-0" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.692098 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/24c45d25-9050-4e29-bffa-d6649cb506b9-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"24c45d25-9050-4e29-bffa-d6649cb506b9\") " pod="openstack/cinder-scheduler-0" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.692122 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24c45d25-9050-4e29-bffa-d6649cb506b9-config-data\") pod \"cinder-scheduler-0\" (UID: \"24c45d25-9050-4e29-bffa-d6649cb506b9\") " pod="openstack/cinder-scheduler-0" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.692141 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/24c45d25-9050-4e29-bffa-d6649cb506b9-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"24c45d25-9050-4e29-bffa-d6649cb506b9\") " pod="openstack/cinder-scheduler-0" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.692181 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24c45d25-9050-4e29-bffa-d6649cb506b9-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"24c45d25-9050-4e29-bffa-d6649cb506b9\") " pod="openstack/cinder-scheduler-0" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.701627 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7965876c4f-xzpq9"] Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.792470 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.793402 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8678a3ca-f406-4732-8478-56f5ea2f6174-ovsdbserver-nb\") pod \"dnsmasq-dns-7965876c4f-xzpq9\" (UID: \"8678a3ca-f406-4732-8478-56f5ea2f6174\") " pod="openstack/dnsmasq-dns-7965876c4f-xzpq9" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.793477 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/24c45d25-9050-4e29-bffa-d6649cb506b9-scripts\") pod \"cinder-scheduler-0\" (UID: \"24c45d25-9050-4e29-bffa-d6649cb506b9\") " pod="openstack/cinder-scheduler-0" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.793512 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8678a3ca-f406-4732-8478-56f5ea2f6174-config\") pod \"dnsmasq-dns-7965876c4f-xzpq9\" (UID: \"8678a3ca-f406-4732-8478-56f5ea2f6174\") " pod="openstack/dnsmasq-dns-7965876c4f-xzpq9" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.793563 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/24c45d25-9050-4e29-bffa-d6649cb506b9-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"24c45d25-9050-4e29-bffa-d6649cb506b9\") " pod="openstack/cinder-scheduler-0" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.793597 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24c45d25-9050-4e29-bffa-d6649cb506b9-config-data\") pod \"cinder-scheduler-0\" (UID: \"24c45d25-9050-4e29-bffa-d6649cb506b9\") " pod="openstack/cinder-scheduler-0" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.793619 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/24c45d25-9050-4e29-bffa-d6649cb506b9-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"24c45d25-9050-4e29-bffa-d6649cb506b9\") " pod="openstack/cinder-scheduler-0" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.793669 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24c45d25-9050-4e29-bffa-d6649cb506b9-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"24c45d25-9050-4e29-bffa-d6649cb506b9\") " pod="openstack/cinder-scheduler-0" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.793742 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8678a3ca-f406-4732-8478-56f5ea2f6174-dns-svc\") pod \"dnsmasq-dns-7965876c4f-xzpq9\" (UID: \"8678a3ca-f406-4732-8478-56f5ea2f6174\") " pod="openstack/dnsmasq-dns-7965876c4f-xzpq9" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.793787 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8678a3ca-f406-4732-8478-56f5ea2f6174-dns-swift-storage-0\") pod \"dnsmasq-dns-7965876c4f-xzpq9\" (UID: \"8678a3ca-f406-4732-8478-56f5ea2f6174\") " pod="openstack/dnsmasq-dns-7965876c4f-xzpq9" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.793821 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8678a3ca-f406-4732-8478-56f5ea2f6174-ovsdbserver-sb\") pod \"dnsmasq-dns-7965876c4f-xzpq9\" (UID: \"8678a3ca-f406-4732-8478-56f5ea2f6174\") " pod="openstack/dnsmasq-dns-7965876c4f-xzpq9" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.793877 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cwlch\" (UniqueName: \"kubernetes.io/projected/8678a3ca-f406-4732-8478-56f5ea2f6174-kube-api-access-cwlch\") pod \"dnsmasq-dns-7965876c4f-xzpq9\" (UID: \"8678a3ca-f406-4732-8478-56f5ea2f6174\") " pod="openstack/dnsmasq-dns-7965876c4f-xzpq9" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.793937 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cs85k\" (UniqueName: \"kubernetes.io/projected/24c45d25-9050-4e29-bffa-d6649cb506b9-kube-api-access-cs85k\") pod \"cinder-scheduler-0\" (UID: \"24c45d25-9050-4e29-bffa-d6649cb506b9\") " pod="openstack/cinder-scheduler-0" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.793949 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.798309 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24c45d25-9050-4e29-bffa-d6649cb506b9-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"24c45d25-9050-4e29-bffa-d6649cb506b9\") " pod="openstack/cinder-scheduler-0" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.798422 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/24c45d25-9050-4e29-bffa-d6649cb506b9-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"24c45d25-9050-4e29-bffa-d6649cb506b9\") " pod="openstack/cinder-scheduler-0" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.798721 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.803571 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24c45d25-9050-4e29-bffa-d6649cb506b9-config-data\") pod \"cinder-scheduler-0\" (UID: \"24c45d25-9050-4e29-bffa-d6649cb506b9\") " pod="openstack/cinder-scheduler-0" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.805666 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/24c45d25-9050-4e29-bffa-d6649cb506b9-scripts\") pod \"cinder-scheduler-0\" (UID: \"24c45d25-9050-4e29-bffa-d6649cb506b9\") " pod="openstack/cinder-scheduler-0" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.806567 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/24c45d25-9050-4e29-bffa-d6649cb506b9-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"24c45d25-9050-4e29-bffa-d6649cb506b9\") " pod="openstack/cinder-scheduler-0" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.820725 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.834877 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cs85k\" (UniqueName: \"kubernetes.io/projected/24c45d25-9050-4e29-bffa-d6649cb506b9-kube-api-access-cs85k\") pod \"cinder-scheduler-0\" (UID: \"24c45d25-9050-4e29-bffa-d6649cb506b9\") " pod="openstack/cinder-scheduler-0" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.895887 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8678a3ca-f406-4732-8478-56f5ea2f6174-ovsdbserver-nb\") pod \"dnsmasq-dns-7965876c4f-xzpq9\" (UID: \"8678a3ca-f406-4732-8478-56f5ea2f6174\") " pod="openstack/dnsmasq-dns-7965876c4f-xzpq9" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.895942 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c7748e83-dd92-4a18-acc5-c1f9410f0710-config-data-custom\") pod \"cinder-api-0\" (UID: \"c7748e83-dd92-4a18-acc5-c1f9410f0710\") " pod="openstack/cinder-api-0" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.895974 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8678a3ca-f406-4732-8478-56f5ea2f6174-config\") pod \"dnsmasq-dns-7965876c4f-xzpq9\" (UID: \"8678a3ca-f406-4732-8478-56f5ea2f6174\") " pod="openstack/dnsmasq-dns-7965876c4f-xzpq9" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.896016 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c7748e83-dd92-4a18-acc5-c1f9410f0710-logs\") pod \"cinder-api-0\" (UID: \"c7748e83-dd92-4a18-acc5-c1f9410f0710\") " pod="openstack/cinder-api-0" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.896063 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7748e83-dd92-4a18-acc5-c1f9410f0710-config-data\") pod \"cinder-api-0\" (UID: \"c7748e83-dd92-4a18-acc5-c1f9410f0710\") " pod="openstack/cinder-api-0" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.896154 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8678a3ca-f406-4732-8478-56f5ea2f6174-dns-svc\") pod \"dnsmasq-dns-7965876c4f-xzpq9\" (UID: \"8678a3ca-f406-4732-8478-56f5ea2f6174\") " pod="openstack/dnsmasq-dns-7965876c4f-xzpq9" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.896190 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8678a3ca-f406-4732-8478-56f5ea2f6174-dns-swift-storage-0\") pod \"dnsmasq-dns-7965876c4f-xzpq9\" (UID: \"8678a3ca-f406-4732-8478-56f5ea2f6174\") " pod="openstack/dnsmasq-dns-7965876c4f-xzpq9" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.896210 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8678a3ca-f406-4732-8478-56f5ea2f6174-ovsdbserver-sb\") pod \"dnsmasq-dns-7965876c4f-xzpq9\" (UID: \"8678a3ca-f406-4732-8478-56f5ea2f6174\") " pod="openstack/dnsmasq-dns-7965876c4f-xzpq9" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.896235 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c7748e83-dd92-4a18-acc5-c1f9410f0710-etc-machine-id\") pod \"cinder-api-0\" (UID: \"c7748e83-dd92-4a18-acc5-c1f9410f0710\") " pod="openstack/cinder-api-0" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.896255 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cwlch\" (UniqueName: \"kubernetes.io/projected/8678a3ca-f406-4732-8478-56f5ea2f6174-kube-api-access-cwlch\") pod \"dnsmasq-dns-7965876c4f-xzpq9\" (UID: \"8678a3ca-f406-4732-8478-56f5ea2f6174\") " pod="openstack/dnsmasq-dns-7965876c4f-xzpq9" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.896278 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7748e83-dd92-4a18-acc5-c1f9410f0710-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"c7748e83-dd92-4a18-acc5-c1f9410f0710\") " pod="openstack/cinder-api-0" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.896315 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d2gd9\" (UniqueName: \"kubernetes.io/projected/c7748e83-dd92-4a18-acc5-c1f9410f0710-kube-api-access-d2gd9\") pod \"cinder-api-0\" (UID: \"c7748e83-dd92-4a18-acc5-c1f9410f0710\") " pod="openstack/cinder-api-0" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.896334 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7748e83-dd92-4a18-acc5-c1f9410f0710-scripts\") pod \"cinder-api-0\" (UID: \"c7748e83-dd92-4a18-acc5-c1f9410f0710\") " pod="openstack/cinder-api-0" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.896847 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8678a3ca-f406-4732-8478-56f5ea2f6174-ovsdbserver-nb\") pod \"dnsmasq-dns-7965876c4f-xzpq9\" (UID: \"8678a3ca-f406-4732-8478-56f5ea2f6174\") " pod="openstack/dnsmasq-dns-7965876c4f-xzpq9" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.896891 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8678a3ca-f406-4732-8478-56f5ea2f6174-config\") pod \"dnsmasq-dns-7965876c4f-xzpq9\" (UID: \"8678a3ca-f406-4732-8478-56f5ea2f6174\") " pod="openstack/dnsmasq-dns-7965876c4f-xzpq9" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.897141 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8678a3ca-f406-4732-8478-56f5ea2f6174-dns-svc\") pod \"dnsmasq-dns-7965876c4f-xzpq9\" (UID: \"8678a3ca-f406-4732-8478-56f5ea2f6174\") " pod="openstack/dnsmasq-dns-7965876c4f-xzpq9" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.897424 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8678a3ca-f406-4732-8478-56f5ea2f6174-dns-swift-storage-0\") pod \"dnsmasq-dns-7965876c4f-xzpq9\" (UID: \"8678a3ca-f406-4732-8478-56f5ea2f6174\") " pod="openstack/dnsmasq-dns-7965876c4f-xzpq9" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.897794 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8678a3ca-f406-4732-8478-56f5ea2f6174-ovsdbserver-sb\") pod \"dnsmasq-dns-7965876c4f-xzpq9\" (UID: \"8678a3ca-f406-4732-8478-56f5ea2f6174\") " pod="openstack/dnsmasq-dns-7965876c4f-xzpq9" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.908320 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.918183 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de74d309-5e9e-4cd1-8d1a-c136ac9fa51c" path="/var/lib/kubelet/pods/de74d309-5e9e-4cd1-8d1a-c136ac9fa51c/volumes" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.922148 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cwlch\" (UniqueName: \"kubernetes.io/projected/8678a3ca-f406-4732-8478-56f5ea2f6174-kube-api-access-cwlch\") pod \"dnsmasq-dns-7965876c4f-xzpq9\" (UID: \"8678a3ca-f406-4732-8478-56f5ea2f6174\") " pod="openstack/dnsmasq-dns-7965876c4f-xzpq9" Nov 26 14:41:01 crc kubenswrapper[5037]: I1126 14:41:01.956004 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7978b45fdd-7t6zc" Nov 26 14:41:02 crc kubenswrapper[5037]: I1126 14:41:02.004574 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7965876c4f-xzpq9" Nov 26 14:41:02 crc kubenswrapper[5037]: I1126 14:41:02.005126 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c7748e83-dd92-4a18-acc5-c1f9410f0710-logs\") pod \"cinder-api-0\" (UID: \"c7748e83-dd92-4a18-acc5-c1f9410f0710\") " pod="openstack/cinder-api-0" Nov 26 14:41:02 crc kubenswrapper[5037]: I1126 14:41:02.005200 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7748e83-dd92-4a18-acc5-c1f9410f0710-config-data\") pod \"cinder-api-0\" (UID: \"c7748e83-dd92-4a18-acc5-c1f9410f0710\") " pod="openstack/cinder-api-0" Nov 26 14:41:02 crc kubenswrapper[5037]: I1126 14:41:02.005245 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c7748e83-dd92-4a18-acc5-c1f9410f0710-etc-machine-id\") pod \"cinder-api-0\" (UID: \"c7748e83-dd92-4a18-acc5-c1f9410f0710\") " pod="openstack/cinder-api-0" Nov 26 14:41:02 crc kubenswrapper[5037]: I1126 14:41:02.005302 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7748e83-dd92-4a18-acc5-c1f9410f0710-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"c7748e83-dd92-4a18-acc5-c1f9410f0710\") " pod="openstack/cinder-api-0" Nov 26 14:41:02 crc kubenswrapper[5037]: I1126 14:41:02.005328 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d2gd9\" (UniqueName: \"kubernetes.io/projected/c7748e83-dd92-4a18-acc5-c1f9410f0710-kube-api-access-d2gd9\") pod \"cinder-api-0\" (UID: \"c7748e83-dd92-4a18-acc5-c1f9410f0710\") " pod="openstack/cinder-api-0" Nov 26 14:41:02 crc kubenswrapper[5037]: I1126 14:41:02.005346 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7748e83-dd92-4a18-acc5-c1f9410f0710-scripts\") pod \"cinder-api-0\" (UID: \"c7748e83-dd92-4a18-acc5-c1f9410f0710\") " pod="openstack/cinder-api-0" Nov 26 14:41:02 crc kubenswrapper[5037]: I1126 14:41:02.005401 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c7748e83-dd92-4a18-acc5-c1f9410f0710-config-data-custom\") pod \"cinder-api-0\" (UID: \"c7748e83-dd92-4a18-acc5-c1f9410f0710\") " pod="openstack/cinder-api-0" Nov 26 14:41:02 crc kubenswrapper[5037]: I1126 14:41:02.005530 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c7748e83-dd92-4a18-acc5-c1f9410f0710-logs\") pod \"cinder-api-0\" (UID: \"c7748e83-dd92-4a18-acc5-c1f9410f0710\") " pod="openstack/cinder-api-0" Nov 26 14:41:02 crc kubenswrapper[5037]: I1126 14:41:02.007524 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c7748e83-dd92-4a18-acc5-c1f9410f0710-etc-machine-id\") pod \"cinder-api-0\" (UID: \"c7748e83-dd92-4a18-acc5-c1f9410f0710\") " pod="openstack/cinder-api-0" Nov 26 14:41:02 crc kubenswrapper[5037]: I1126 14:41:02.009917 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7748e83-dd92-4a18-acc5-c1f9410f0710-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"c7748e83-dd92-4a18-acc5-c1f9410f0710\") " pod="openstack/cinder-api-0" Nov 26 14:41:02 crc kubenswrapper[5037]: I1126 14:41:02.010017 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c7748e83-dd92-4a18-acc5-c1f9410f0710-config-data-custom\") pod \"cinder-api-0\" (UID: \"c7748e83-dd92-4a18-acc5-c1f9410f0710\") " pod="openstack/cinder-api-0" Nov 26 14:41:02 crc kubenswrapper[5037]: I1126 14:41:02.013802 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7748e83-dd92-4a18-acc5-c1f9410f0710-config-data\") pod \"cinder-api-0\" (UID: \"c7748e83-dd92-4a18-acc5-c1f9410f0710\") " pod="openstack/cinder-api-0" Nov 26 14:41:02 crc kubenswrapper[5037]: I1126 14:41:02.016032 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7748e83-dd92-4a18-acc5-c1f9410f0710-scripts\") pod \"cinder-api-0\" (UID: \"c7748e83-dd92-4a18-acc5-c1f9410f0710\") " pod="openstack/cinder-api-0" Nov 26 14:41:02 crc kubenswrapper[5037]: I1126 14:41:02.027847 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d2gd9\" (UniqueName: \"kubernetes.io/projected/c7748e83-dd92-4a18-acc5-c1f9410f0710-kube-api-access-d2gd9\") pod \"cinder-api-0\" (UID: \"c7748e83-dd92-4a18-acc5-c1f9410f0710\") " pod="openstack/cinder-api-0" Nov 26 14:41:02 crc kubenswrapper[5037]: I1126 14:41:02.120482 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7978b45fdd-7t6zc" Nov 26 14:41:02 crc kubenswrapper[5037]: I1126 14:41:02.176730 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-84d74cb456-scxkz"] Nov 26 14:41:02 crc kubenswrapper[5037]: I1126 14:41:02.177207 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-84d74cb456-scxkz" podUID="9040dbae-017f-4cc9-98c0-6b0228cfa220" containerName="barbican-api-log" containerID="cri-o://df4a5b138e39379e1becb18718f2bb4ed479d3c758ce519599b163cabe3455f8" gracePeriod=30 Nov 26 14:41:02 crc kubenswrapper[5037]: I1126 14:41:02.178847 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-84d74cb456-scxkz" podUID="9040dbae-017f-4cc9-98c0-6b0228cfa220" containerName="barbican-api" containerID="cri-o://a46c24c1c373c3d41cfd0b561ce04fa933d6fc18ae7e8258658363f0d60384c5" gracePeriod=30 Nov 26 14:41:02 crc kubenswrapper[5037]: I1126 14:41:02.207019 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 14:41:02 crc kubenswrapper[5037]: I1126 14:41:02.382548 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 14:41:02 crc kubenswrapper[5037]: W1126 14:41:02.388244 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod24c45d25_9050_4e29_bffa_d6649cb506b9.slice/crio-05e09690be6f9ddd53946bab3ea2b361511b04a4eecc3d3b1c80250f8b18f65a WatchSource:0}: Error finding container 05e09690be6f9ddd53946bab3ea2b361511b04a4eecc3d3b1c80250f8b18f65a: Status 404 returned error can't find the container with id 05e09690be6f9ddd53946bab3ea2b361511b04a4eecc3d3b1c80250f8b18f65a Nov 26 14:41:02 crc kubenswrapper[5037]: I1126 14:41:02.431844 5037 generic.go:334] "Generic (PLEG): container finished" podID="9040dbae-017f-4cc9-98c0-6b0228cfa220" containerID="df4a5b138e39379e1becb18718f2bb4ed479d3c758ce519599b163cabe3455f8" exitCode=143 Nov 26 14:41:02 crc kubenswrapper[5037]: I1126 14:41:02.431921 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-84d74cb456-scxkz" event={"ID":"9040dbae-017f-4cc9-98c0-6b0228cfa220","Type":"ContainerDied","Data":"df4a5b138e39379e1becb18718f2bb4ed479d3c758ce519599b163cabe3455f8"} Nov 26 14:41:02 crc kubenswrapper[5037]: I1126 14:41:02.444394 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"24c45d25-9050-4e29-bffa-d6649cb506b9","Type":"ContainerStarted","Data":"05e09690be6f9ddd53946bab3ea2b361511b04a4eecc3d3b1c80250f8b18f65a"} Nov 26 14:41:02 crc kubenswrapper[5037]: I1126 14:41:02.516311 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7965876c4f-xzpq9"] Nov 26 14:41:02 crc kubenswrapper[5037]: I1126 14:41:02.741449 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 26 14:41:02 crc kubenswrapper[5037]: W1126 14:41:02.750311 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc7748e83_dd92_4a18_acc5_c1f9410f0710.slice/crio-04b0bee7db51afc50a1912b3e1928667a0b82205498ffacbe1f66a5a0c7433ff WatchSource:0}: Error finding container 04b0bee7db51afc50a1912b3e1928667a0b82205498ffacbe1f66a5a0c7433ff: Status 404 returned error can't find the container with id 04b0bee7db51afc50a1912b3e1928667a0b82205498ffacbe1f66a5a0c7433ff Nov 26 14:41:03 crc kubenswrapper[5037]: I1126 14:41:03.451942 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c7748e83-dd92-4a18-acc5-c1f9410f0710","Type":"ContainerStarted","Data":"04b0bee7db51afc50a1912b3e1928667a0b82205498ffacbe1f66a5a0c7433ff"} Nov 26 14:41:03 crc kubenswrapper[5037]: I1126 14:41:03.454138 5037 generic.go:334] "Generic (PLEG): container finished" podID="8678a3ca-f406-4732-8478-56f5ea2f6174" containerID="86e778099b0962ae8384f2c06e358ab70c59fdf998892ac5a48d9b9fc91dac8e" exitCode=0 Nov 26 14:41:03 crc kubenswrapper[5037]: I1126 14:41:03.454196 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7965876c4f-xzpq9" event={"ID":"8678a3ca-f406-4732-8478-56f5ea2f6174","Type":"ContainerDied","Data":"86e778099b0962ae8384f2c06e358ab70c59fdf998892ac5a48d9b9fc91dac8e"} Nov 26 14:41:03 crc kubenswrapper[5037]: I1126 14:41:03.454260 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7965876c4f-xzpq9" event={"ID":"8678a3ca-f406-4732-8478-56f5ea2f6174","Type":"ContainerStarted","Data":"2e92351b867ee750c38ddac15e82a58d4ac748f02eb6dd93626c4eb49a639b6c"} Nov 26 14:41:04 crc kubenswrapper[5037]: I1126 14:41:04.199632 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 26 14:41:04 crc kubenswrapper[5037]: I1126 14:41:04.465848 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8ec55994-5e0e-4f1e-aece-501836b46c63","Type":"ContainerStarted","Data":"a636eb1c52ba5fb1a646ed001892d91916061ddd778605fd9b8ccd3a6c44f12c"} Nov 26 14:41:04 crc kubenswrapper[5037]: I1126 14:41:04.466315 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 14:41:04 crc kubenswrapper[5037]: I1126 14:41:04.469750 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7965876c4f-xzpq9" event={"ID":"8678a3ca-f406-4732-8478-56f5ea2f6174","Type":"ContainerStarted","Data":"1132189092b14ee6c21db91aac9b539c44a1f52e5371431b948a4a1b8b523cd1"} Nov 26 14:41:04 crc kubenswrapper[5037]: I1126 14:41:04.469891 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7965876c4f-xzpq9" Nov 26 14:41:04 crc kubenswrapper[5037]: I1126 14:41:04.471350 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c7748e83-dd92-4a18-acc5-c1f9410f0710","Type":"ContainerStarted","Data":"fdf12f109a43aab8d292e0e9d7b7f874edd24165d44c4a41155272bee54d05a3"} Nov 26 14:41:04 crc kubenswrapper[5037]: I1126 14:41:04.493200 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.422588089 podStartE2EDuration="7.493180932s" podCreationTimestamp="2025-11-26 14:40:57 +0000 UTC" firstStartedPulling="2025-11-26 14:40:58.238564599 +0000 UTC m=+1525.035334823" lastFinishedPulling="2025-11-26 14:41:03.309157482 +0000 UTC m=+1530.105927666" observedRunningTime="2025-11-26 14:41:04.484935121 +0000 UTC m=+1531.281705305" watchObservedRunningTime="2025-11-26 14:41:04.493180932 +0000 UTC m=+1531.289951106" Nov 26 14:41:04 crc kubenswrapper[5037]: I1126 14:41:04.508771 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7965876c4f-xzpq9" podStartSLOduration=3.508754772 podStartE2EDuration="3.508754772s" podCreationTimestamp="2025-11-26 14:41:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:41:04.502197233 +0000 UTC m=+1531.298967437" watchObservedRunningTime="2025-11-26 14:41:04.508754772 +0000 UTC m=+1531.305524946" Nov 26 14:41:05 crc kubenswrapper[5037]: I1126 14:41:05.483903 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c7748e83-dd92-4a18-acc5-c1f9410f0710","Type":"ContainerStarted","Data":"7ac2667b491dabca84624b0d9aceba7271e36d89c697653bf8b7d838b024a93f"} Nov 26 14:41:05 crc kubenswrapper[5037]: I1126 14:41:05.484372 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="c7748e83-dd92-4a18-acc5-c1f9410f0710" containerName="cinder-api-log" containerID="cri-o://fdf12f109a43aab8d292e0e9d7b7f874edd24165d44c4a41155272bee54d05a3" gracePeriod=30 Nov 26 14:41:05 crc kubenswrapper[5037]: I1126 14:41:05.484655 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 26 14:41:05 crc kubenswrapper[5037]: I1126 14:41:05.484931 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="c7748e83-dd92-4a18-acc5-c1f9410f0710" containerName="cinder-api" containerID="cri-o://7ac2667b491dabca84624b0d9aceba7271e36d89c697653bf8b7d838b024a93f" gracePeriod=30 Nov 26 14:41:05 crc kubenswrapper[5037]: I1126 14:41:05.488372 5037 generic.go:334] "Generic (PLEG): container finished" podID="9040dbae-017f-4cc9-98c0-6b0228cfa220" containerID="a46c24c1c373c3d41cfd0b561ce04fa933d6fc18ae7e8258658363f0d60384c5" exitCode=0 Nov 26 14:41:05 crc kubenswrapper[5037]: I1126 14:41:05.489144 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-84d74cb456-scxkz" event={"ID":"9040dbae-017f-4cc9-98c0-6b0228cfa220","Type":"ContainerDied","Data":"a46c24c1c373c3d41cfd0b561ce04fa933d6fc18ae7e8258658363f0d60384c5"} Nov 26 14:41:05 crc kubenswrapper[5037]: I1126 14:41:05.513336 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.513318452 podStartE2EDuration="4.513318452s" podCreationTimestamp="2025-11-26 14:41:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:41:05.505363568 +0000 UTC m=+1532.302133762" watchObservedRunningTime="2025-11-26 14:41:05.513318452 +0000 UTC m=+1532.310088636" Nov 26 14:41:05 crc kubenswrapper[5037]: I1126 14:41:05.786831 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-84d74cb456-scxkz" Nov 26 14:41:05 crc kubenswrapper[5037]: I1126 14:41:05.882141 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9040dbae-017f-4cc9-98c0-6b0228cfa220-combined-ca-bundle\") pod \"9040dbae-017f-4cc9-98c0-6b0228cfa220\" (UID: \"9040dbae-017f-4cc9-98c0-6b0228cfa220\") " Nov 26 14:41:05 crc kubenswrapper[5037]: I1126 14:41:05.882210 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9040dbae-017f-4cc9-98c0-6b0228cfa220-config-data\") pod \"9040dbae-017f-4cc9-98c0-6b0228cfa220\" (UID: \"9040dbae-017f-4cc9-98c0-6b0228cfa220\") " Nov 26 14:41:05 crc kubenswrapper[5037]: I1126 14:41:05.882728 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9040dbae-017f-4cc9-98c0-6b0228cfa220-logs\") pod \"9040dbae-017f-4cc9-98c0-6b0228cfa220\" (UID: \"9040dbae-017f-4cc9-98c0-6b0228cfa220\") " Nov 26 14:41:05 crc kubenswrapper[5037]: I1126 14:41:05.883485 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9040dbae-017f-4cc9-98c0-6b0228cfa220-config-data-custom\") pod \"9040dbae-017f-4cc9-98c0-6b0228cfa220\" (UID: \"9040dbae-017f-4cc9-98c0-6b0228cfa220\") " Nov 26 14:41:05 crc kubenswrapper[5037]: I1126 14:41:05.883521 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vpn6r\" (UniqueName: \"kubernetes.io/projected/9040dbae-017f-4cc9-98c0-6b0228cfa220-kube-api-access-vpn6r\") pod \"9040dbae-017f-4cc9-98c0-6b0228cfa220\" (UID: \"9040dbae-017f-4cc9-98c0-6b0228cfa220\") " Nov 26 14:41:05 crc kubenswrapper[5037]: I1126 14:41:05.884761 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9040dbae-017f-4cc9-98c0-6b0228cfa220-logs" (OuterVolumeSpecName: "logs") pod "9040dbae-017f-4cc9-98c0-6b0228cfa220" (UID: "9040dbae-017f-4cc9-98c0-6b0228cfa220"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:41:05 crc kubenswrapper[5037]: I1126 14:41:05.890164 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9040dbae-017f-4cc9-98c0-6b0228cfa220-kube-api-access-vpn6r" (OuterVolumeSpecName: "kube-api-access-vpn6r") pod "9040dbae-017f-4cc9-98c0-6b0228cfa220" (UID: "9040dbae-017f-4cc9-98c0-6b0228cfa220"). InnerVolumeSpecName "kube-api-access-vpn6r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:41:05 crc kubenswrapper[5037]: I1126 14:41:05.896135 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9040dbae-017f-4cc9-98c0-6b0228cfa220-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "9040dbae-017f-4cc9-98c0-6b0228cfa220" (UID: "9040dbae-017f-4cc9-98c0-6b0228cfa220"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:05 crc kubenswrapper[5037]: I1126 14:41:05.908625 5037 scope.go:117] "RemoveContainer" containerID="5e69d7717514aa68d798cc4f8eee9b2d5d3e9666ca3b110c2cb4c6b90f9e1181" Nov 26 14:41:05 crc kubenswrapper[5037]: E1126 14:41:05.908998 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:41:05 crc kubenswrapper[5037]: I1126 14:41:05.918540 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9040dbae-017f-4cc9-98c0-6b0228cfa220-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9040dbae-017f-4cc9-98c0-6b0228cfa220" (UID: "9040dbae-017f-4cc9-98c0-6b0228cfa220"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:05 crc kubenswrapper[5037]: I1126 14:41:05.953868 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9040dbae-017f-4cc9-98c0-6b0228cfa220-config-data" (OuterVolumeSpecName: "config-data") pod "9040dbae-017f-4cc9-98c0-6b0228cfa220" (UID: "9040dbae-017f-4cc9-98c0-6b0228cfa220"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:05 crc kubenswrapper[5037]: I1126 14:41:05.988980 5037 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9040dbae-017f-4cc9-98c0-6b0228cfa220-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:05 crc kubenswrapper[5037]: I1126 14:41:05.989022 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vpn6r\" (UniqueName: \"kubernetes.io/projected/9040dbae-017f-4cc9-98c0-6b0228cfa220-kube-api-access-vpn6r\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:05 crc kubenswrapper[5037]: I1126 14:41:05.989036 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9040dbae-017f-4cc9-98c0-6b0228cfa220-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:05 crc kubenswrapper[5037]: I1126 14:41:05.989049 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9040dbae-017f-4cc9-98c0-6b0228cfa220-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:05 crc kubenswrapper[5037]: I1126 14:41:05.989060 5037 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9040dbae-017f-4cc9-98c0-6b0228cfa220-logs\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.137997 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.192145 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d2gd9\" (UniqueName: \"kubernetes.io/projected/c7748e83-dd92-4a18-acc5-c1f9410f0710-kube-api-access-d2gd9\") pod \"c7748e83-dd92-4a18-acc5-c1f9410f0710\" (UID: \"c7748e83-dd92-4a18-acc5-c1f9410f0710\") " Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.192573 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c7748e83-dd92-4a18-acc5-c1f9410f0710-logs\") pod \"c7748e83-dd92-4a18-acc5-c1f9410f0710\" (UID: \"c7748e83-dd92-4a18-acc5-c1f9410f0710\") " Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.192632 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7748e83-dd92-4a18-acc5-c1f9410f0710-config-data\") pod \"c7748e83-dd92-4a18-acc5-c1f9410f0710\" (UID: \"c7748e83-dd92-4a18-acc5-c1f9410f0710\") " Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.192648 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7748e83-dd92-4a18-acc5-c1f9410f0710-combined-ca-bundle\") pod \"c7748e83-dd92-4a18-acc5-c1f9410f0710\" (UID: \"c7748e83-dd92-4a18-acc5-c1f9410f0710\") " Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.192744 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c7748e83-dd92-4a18-acc5-c1f9410f0710-etc-machine-id\") pod \"c7748e83-dd92-4a18-acc5-c1f9410f0710\" (UID: \"c7748e83-dd92-4a18-acc5-c1f9410f0710\") " Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.192772 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c7748e83-dd92-4a18-acc5-c1f9410f0710-config-data-custom\") pod \"c7748e83-dd92-4a18-acc5-c1f9410f0710\" (UID: \"c7748e83-dd92-4a18-acc5-c1f9410f0710\") " Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.192798 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7748e83-dd92-4a18-acc5-c1f9410f0710-scripts\") pod \"c7748e83-dd92-4a18-acc5-c1f9410f0710\" (UID: \"c7748e83-dd92-4a18-acc5-c1f9410f0710\") " Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.192831 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c7748e83-dd92-4a18-acc5-c1f9410f0710-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "c7748e83-dd92-4a18-acc5-c1f9410f0710" (UID: "c7748e83-dd92-4a18-acc5-c1f9410f0710"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.192979 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c7748e83-dd92-4a18-acc5-c1f9410f0710-logs" (OuterVolumeSpecName: "logs") pod "c7748e83-dd92-4a18-acc5-c1f9410f0710" (UID: "c7748e83-dd92-4a18-acc5-c1f9410f0710"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.193527 5037 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c7748e83-dd92-4a18-acc5-c1f9410f0710-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.193552 5037 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c7748e83-dd92-4a18-acc5-c1f9410f0710-logs\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.198451 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7748e83-dd92-4a18-acc5-c1f9410f0710-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "c7748e83-dd92-4a18-acc5-c1f9410f0710" (UID: "c7748e83-dd92-4a18-acc5-c1f9410f0710"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.199694 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7748e83-dd92-4a18-acc5-c1f9410f0710-kube-api-access-d2gd9" (OuterVolumeSpecName: "kube-api-access-d2gd9") pod "c7748e83-dd92-4a18-acc5-c1f9410f0710" (UID: "c7748e83-dd92-4a18-acc5-c1f9410f0710"). InnerVolumeSpecName "kube-api-access-d2gd9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.201056 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7748e83-dd92-4a18-acc5-c1f9410f0710-scripts" (OuterVolumeSpecName: "scripts") pod "c7748e83-dd92-4a18-acc5-c1f9410f0710" (UID: "c7748e83-dd92-4a18-acc5-c1f9410f0710"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.223882 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7748e83-dd92-4a18-acc5-c1f9410f0710-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c7748e83-dd92-4a18-acc5-c1f9410f0710" (UID: "c7748e83-dd92-4a18-acc5-c1f9410f0710"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.243219 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7748e83-dd92-4a18-acc5-c1f9410f0710-config-data" (OuterVolumeSpecName: "config-data") pod "c7748e83-dd92-4a18-acc5-c1f9410f0710" (UID: "c7748e83-dd92-4a18-acc5-c1f9410f0710"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.295361 5037 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c7748e83-dd92-4a18-acc5-c1f9410f0710-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.295394 5037 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7748e83-dd92-4a18-acc5-c1f9410f0710-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.295403 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d2gd9\" (UniqueName: \"kubernetes.io/projected/c7748e83-dd92-4a18-acc5-c1f9410f0710-kube-api-access-d2gd9\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.295414 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7748e83-dd92-4a18-acc5-c1f9410f0710-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.295424 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7748e83-dd92-4a18-acc5-c1f9410f0710-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.524992 5037 generic.go:334] "Generic (PLEG): container finished" podID="c7748e83-dd92-4a18-acc5-c1f9410f0710" containerID="7ac2667b491dabca84624b0d9aceba7271e36d89c697653bf8b7d838b024a93f" exitCode=0 Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.525044 5037 generic.go:334] "Generic (PLEG): container finished" podID="c7748e83-dd92-4a18-acc5-c1f9410f0710" containerID="fdf12f109a43aab8d292e0e9d7b7f874edd24165d44c4a41155272bee54d05a3" exitCode=143 Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.525128 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.525153 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c7748e83-dd92-4a18-acc5-c1f9410f0710","Type":"ContainerDied","Data":"7ac2667b491dabca84624b0d9aceba7271e36d89c697653bf8b7d838b024a93f"} Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.525368 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c7748e83-dd92-4a18-acc5-c1f9410f0710","Type":"ContainerDied","Data":"fdf12f109a43aab8d292e0e9d7b7f874edd24165d44c4a41155272bee54d05a3"} Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.525411 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c7748e83-dd92-4a18-acc5-c1f9410f0710","Type":"ContainerDied","Data":"04b0bee7db51afc50a1912b3e1928667a0b82205498ffacbe1f66a5a0c7433ff"} Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.525451 5037 scope.go:117] "RemoveContainer" containerID="7ac2667b491dabca84624b0d9aceba7271e36d89c697653bf8b7d838b024a93f" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.529117 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-84d74cb456-scxkz" event={"ID":"9040dbae-017f-4cc9-98c0-6b0228cfa220","Type":"ContainerDied","Data":"216435e2783b1861caaef88c957ae844c4019d4fbd2ce389b4f2288680f02775"} Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.529250 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-84d74cb456-scxkz" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.536939 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"24c45d25-9050-4e29-bffa-d6649cb506b9","Type":"ContainerStarted","Data":"b5a76dab63bf1288a33999b8f62cf1b107ff7b2d0ed7cd81e704547774580946"} Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.536987 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"24c45d25-9050-4e29-bffa-d6649cb506b9","Type":"ContainerStarted","Data":"a43008a09a3271c6cfb957852cebfe0149f1828f3d55dc6ae8ebbf1fb7af0f82"} Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.571863 5037 scope.go:117] "RemoveContainer" containerID="fdf12f109a43aab8d292e0e9d7b7f874edd24165d44c4a41155272bee54d05a3" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.583277 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.014022597 podStartE2EDuration="5.583254007s" podCreationTimestamp="2025-11-26 14:41:01 +0000 UTC" firstStartedPulling="2025-11-26 14:41:02.39585264 +0000 UTC m=+1529.192622824" lastFinishedPulling="2025-11-26 14:41:04.96508405 +0000 UTC m=+1531.761854234" observedRunningTime="2025-11-26 14:41:06.576571314 +0000 UTC m=+1533.373341518" watchObservedRunningTime="2025-11-26 14:41:06.583254007 +0000 UTC m=+1533.380024211" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.615092 5037 scope.go:117] "RemoveContainer" containerID="7ac2667b491dabca84624b0d9aceba7271e36d89c697653bf8b7d838b024a93f" Nov 26 14:41:06 crc kubenswrapper[5037]: E1126 14:41:06.616646 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7ac2667b491dabca84624b0d9aceba7271e36d89c697653bf8b7d838b024a93f\": container with ID starting with 7ac2667b491dabca84624b0d9aceba7271e36d89c697653bf8b7d838b024a93f not found: ID does not exist" containerID="7ac2667b491dabca84624b0d9aceba7271e36d89c697653bf8b7d838b024a93f" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.616728 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7ac2667b491dabca84624b0d9aceba7271e36d89c697653bf8b7d838b024a93f"} err="failed to get container status \"7ac2667b491dabca84624b0d9aceba7271e36d89c697653bf8b7d838b024a93f\": rpc error: code = NotFound desc = could not find container \"7ac2667b491dabca84624b0d9aceba7271e36d89c697653bf8b7d838b024a93f\": container with ID starting with 7ac2667b491dabca84624b0d9aceba7271e36d89c697653bf8b7d838b024a93f not found: ID does not exist" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.616795 5037 scope.go:117] "RemoveContainer" containerID="fdf12f109a43aab8d292e0e9d7b7f874edd24165d44c4a41155272bee54d05a3" Nov 26 14:41:06 crc kubenswrapper[5037]: E1126 14:41:06.617518 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fdf12f109a43aab8d292e0e9d7b7f874edd24165d44c4a41155272bee54d05a3\": container with ID starting with fdf12f109a43aab8d292e0e9d7b7f874edd24165d44c4a41155272bee54d05a3 not found: ID does not exist" containerID="fdf12f109a43aab8d292e0e9d7b7f874edd24165d44c4a41155272bee54d05a3" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.617556 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fdf12f109a43aab8d292e0e9d7b7f874edd24165d44c4a41155272bee54d05a3"} err="failed to get container status \"fdf12f109a43aab8d292e0e9d7b7f874edd24165d44c4a41155272bee54d05a3\": rpc error: code = NotFound desc = could not find container \"fdf12f109a43aab8d292e0e9d7b7f874edd24165d44c4a41155272bee54d05a3\": container with ID starting with fdf12f109a43aab8d292e0e9d7b7f874edd24165d44c4a41155272bee54d05a3 not found: ID does not exist" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.617597 5037 scope.go:117] "RemoveContainer" containerID="7ac2667b491dabca84624b0d9aceba7271e36d89c697653bf8b7d838b024a93f" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.618910 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7ac2667b491dabca84624b0d9aceba7271e36d89c697653bf8b7d838b024a93f"} err="failed to get container status \"7ac2667b491dabca84624b0d9aceba7271e36d89c697653bf8b7d838b024a93f\": rpc error: code = NotFound desc = could not find container \"7ac2667b491dabca84624b0d9aceba7271e36d89c697653bf8b7d838b024a93f\": container with ID starting with 7ac2667b491dabca84624b0d9aceba7271e36d89c697653bf8b7d838b024a93f not found: ID does not exist" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.618936 5037 scope.go:117] "RemoveContainer" containerID="fdf12f109a43aab8d292e0e9d7b7f874edd24165d44c4a41155272bee54d05a3" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.619149 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fdf12f109a43aab8d292e0e9d7b7f874edd24165d44c4a41155272bee54d05a3"} err="failed to get container status \"fdf12f109a43aab8d292e0e9d7b7f874edd24165d44c4a41155272bee54d05a3\": rpc error: code = NotFound desc = could not find container \"fdf12f109a43aab8d292e0e9d7b7f874edd24165d44c4a41155272bee54d05a3\": container with ID starting with fdf12f109a43aab8d292e0e9d7b7f874edd24165d44c4a41155272bee54d05a3 not found: ID does not exist" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.619169 5037 scope.go:117] "RemoveContainer" containerID="a46c24c1c373c3d41cfd0b561ce04fa933d6fc18ae7e8258658363f0d60384c5" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.621447 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-84d74cb456-scxkz"] Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.650185 5037 scope.go:117] "RemoveContainer" containerID="df4a5b138e39379e1becb18718f2bb4ed479d3c758ce519599b163cabe3455f8" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.653405 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-84d74cb456-scxkz"] Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.675975 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.684800 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.699373 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 26 14:41:06 crc kubenswrapper[5037]: E1126 14:41:06.699722 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7748e83-dd92-4a18-acc5-c1f9410f0710" containerName="cinder-api-log" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.699738 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7748e83-dd92-4a18-acc5-c1f9410f0710" containerName="cinder-api-log" Nov 26 14:41:06 crc kubenswrapper[5037]: E1126 14:41:06.699747 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7748e83-dd92-4a18-acc5-c1f9410f0710" containerName="cinder-api" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.699754 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7748e83-dd92-4a18-acc5-c1f9410f0710" containerName="cinder-api" Nov 26 14:41:06 crc kubenswrapper[5037]: E1126 14:41:06.699770 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9040dbae-017f-4cc9-98c0-6b0228cfa220" containerName="barbican-api-log" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.699776 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="9040dbae-017f-4cc9-98c0-6b0228cfa220" containerName="barbican-api-log" Nov 26 14:41:06 crc kubenswrapper[5037]: E1126 14:41:06.699802 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9040dbae-017f-4cc9-98c0-6b0228cfa220" containerName="barbican-api" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.699809 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="9040dbae-017f-4cc9-98c0-6b0228cfa220" containerName="barbican-api" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.699964 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="9040dbae-017f-4cc9-98c0-6b0228cfa220" containerName="barbican-api" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.699980 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7748e83-dd92-4a18-acc5-c1f9410f0710" containerName="cinder-api-log" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.699992 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7748e83-dd92-4a18-acc5-c1f9410f0710" containerName="cinder-api" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.700003 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="9040dbae-017f-4cc9-98c0-6b0228cfa220" containerName="barbican-api-log" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.701466 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.703772 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.704026 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.704131 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.715466 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.806443 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07720f90-b6f7-4b81-9c32-17f1e72b19fa-config-data\") pod \"cinder-api-0\" (UID: \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\") " pod="openstack/cinder-api-0" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.806531 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07720f90-b6f7-4b81-9c32-17f1e72b19fa-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\") " pod="openstack/cinder-api-0" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.806575 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qqjj6\" (UniqueName: \"kubernetes.io/projected/07720f90-b6f7-4b81-9c32-17f1e72b19fa-kube-api-access-qqjj6\") pod \"cinder-api-0\" (UID: \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\") " pod="openstack/cinder-api-0" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.806713 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/07720f90-b6f7-4b81-9c32-17f1e72b19fa-public-tls-certs\") pod \"cinder-api-0\" (UID: \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\") " pod="openstack/cinder-api-0" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.806777 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/07720f90-b6f7-4b81-9c32-17f1e72b19fa-config-data-custom\") pod \"cinder-api-0\" (UID: \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\") " pod="openstack/cinder-api-0" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.806801 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/07720f90-b6f7-4b81-9c32-17f1e72b19fa-etc-machine-id\") pod \"cinder-api-0\" (UID: \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\") " pod="openstack/cinder-api-0" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.806817 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07720f90-b6f7-4b81-9c32-17f1e72b19fa-logs\") pod \"cinder-api-0\" (UID: \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\") " pod="openstack/cinder-api-0" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.806835 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07720f90-b6f7-4b81-9c32-17f1e72b19fa-scripts\") pod \"cinder-api-0\" (UID: \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\") " pod="openstack/cinder-api-0" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.807039 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/07720f90-b6f7-4b81-9c32-17f1e72b19fa-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\") " pod="openstack/cinder-api-0" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.908848 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07720f90-b6f7-4b81-9c32-17f1e72b19fa-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\") " pod="openstack/cinder-api-0" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.909244 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qqjj6\" (UniqueName: \"kubernetes.io/projected/07720f90-b6f7-4b81-9c32-17f1e72b19fa-kube-api-access-qqjj6\") pod \"cinder-api-0\" (UID: \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\") " pod="openstack/cinder-api-0" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.909201 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.909681 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/07720f90-b6f7-4b81-9c32-17f1e72b19fa-public-tls-certs\") pod \"cinder-api-0\" (UID: \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\") " pod="openstack/cinder-api-0" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.909906 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/07720f90-b6f7-4b81-9c32-17f1e72b19fa-config-data-custom\") pod \"cinder-api-0\" (UID: \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\") " pod="openstack/cinder-api-0" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.910024 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/07720f90-b6f7-4b81-9c32-17f1e72b19fa-etc-machine-id\") pod \"cinder-api-0\" (UID: \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\") " pod="openstack/cinder-api-0" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.910125 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07720f90-b6f7-4b81-9c32-17f1e72b19fa-logs\") pod \"cinder-api-0\" (UID: \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\") " pod="openstack/cinder-api-0" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.910374 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07720f90-b6f7-4b81-9c32-17f1e72b19fa-scripts\") pod \"cinder-api-0\" (UID: \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\") " pod="openstack/cinder-api-0" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.910530 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/07720f90-b6f7-4b81-9c32-17f1e72b19fa-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\") " pod="openstack/cinder-api-0" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.910689 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07720f90-b6f7-4b81-9c32-17f1e72b19fa-config-data\") pod \"cinder-api-0\" (UID: \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\") " pod="openstack/cinder-api-0" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.910181 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/07720f90-b6f7-4b81-9c32-17f1e72b19fa-etc-machine-id\") pod \"cinder-api-0\" (UID: \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\") " pod="openstack/cinder-api-0" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.911054 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07720f90-b6f7-4b81-9c32-17f1e72b19fa-logs\") pod \"cinder-api-0\" (UID: \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\") " pod="openstack/cinder-api-0" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.914322 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07720f90-b6f7-4b81-9c32-17f1e72b19fa-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\") " pod="openstack/cinder-api-0" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.915721 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07720f90-b6f7-4b81-9c32-17f1e72b19fa-scripts\") pod \"cinder-api-0\" (UID: \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\") " pod="openstack/cinder-api-0" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.917984 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/07720f90-b6f7-4b81-9c32-17f1e72b19fa-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\") " pod="openstack/cinder-api-0" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.918130 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07720f90-b6f7-4b81-9c32-17f1e72b19fa-config-data\") pod \"cinder-api-0\" (UID: \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\") " pod="openstack/cinder-api-0" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.918343 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/07720f90-b6f7-4b81-9c32-17f1e72b19fa-public-tls-certs\") pod \"cinder-api-0\" (UID: \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\") " pod="openstack/cinder-api-0" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.920548 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/07720f90-b6f7-4b81-9c32-17f1e72b19fa-config-data-custom\") pod \"cinder-api-0\" (UID: \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\") " pod="openstack/cinder-api-0" Nov 26 14:41:06 crc kubenswrapper[5037]: I1126 14:41:06.938856 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qqjj6\" (UniqueName: \"kubernetes.io/projected/07720f90-b6f7-4b81-9c32-17f1e72b19fa-kube-api-access-qqjj6\") pod \"cinder-api-0\" (UID: \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\") " pod="openstack/cinder-api-0" Nov 26 14:41:07 crc kubenswrapper[5037]: I1126 14:41:07.025803 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 14:41:07 crc kubenswrapper[5037]: W1126 14:41:07.490152 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod07720f90_b6f7_4b81_9c32_17f1e72b19fa.slice/crio-201bbd8e2fb4fdbe2c467d4eec9082b94959a51d56c3d72326b1e23bd67261c2 WatchSource:0}: Error finding container 201bbd8e2fb4fdbe2c467d4eec9082b94959a51d56c3d72326b1e23bd67261c2: Status 404 returned error can't find the container with id 201bbd8e2fb4fdbe2c467d4eec9082b94959a51d56c3d72326b1e23bd67261c2 Nov 26 14:41:07 crc kubenswrapper[5037]: I1126 14:41:07.497878 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 26 14:41:07 crc kubenswrapper[5037]: I1126 14:41:07.573798 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"07720f90-b6f7-4b81-9c32-17f1e72b19fa","Type":"ContainerStarted","Data":"201bbd8e2fb4fdbe2c467d4eec9082b94959a51d56c3d72326b1e23bd67261c2"} Nov 26 14:41:07 crc kubenswrapper[5037]: I1126 14:41:07.923641 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9040dbae-017f-4cc9-98c0-6b0228cfa220" path="/var/lib/kubelet/pods/9040dbae-017f-4cc9-98c0-6b0228cfa220/volumes" Nov 26 14:41:07 crc kubenswrapper[5037]: I1126 14:41:07.924237 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c7748e83-dd92-4a18-acc5-c1f9410f0710" path="/var/lib/kubelet/pods/c7748e83-dd92-4a18-acc5-c1f9410f0710/volumes" Nov 26 14:41:08 crc kubenswrapper[5037]: I1126 14:41:08.584124 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"07720f90-b6f7-4b81-9c32-17f1e72b19fa","Type":"ContainerStarted","Data":"1ccf73ea43e62a2d000418194aef023e26ee721280485b1329df2b411c630259"} Nov 26 14:41:09 crc kubenswrapper[5037]: I1126 14:41:09.613763 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"07720f90-b6f7-4b81-9c32-17f1e72b19fa","Type":"ContainerStarted","Data":"6c728b7a4bd6db17ff62032233cd9d220168f2c76bace60a7590b7b669f9d433"} Nov 26 14:41:09 crc kubenswrapper[5037]: I1126 14:41:09.614242 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 26 14:41:09 crc kubenswrapper[5037]: I1126 14:41:09.661593 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.661555062 podStartE2EDuration="3.661555062s" podCreationTimestamp="2025-11-26 14:41:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:41:09.648126975 +0000 UTC m=+1536.444897219" watchObservedRunningTime="2025-11-26 14:41:09.661555062 +0000 UTC m=+1536.458325296" Nov 26 14:41:11 crc kubenswrapper[5037]: I1126 14:41:11.871004 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-854dc8db7d-j5l6c" Nov 26 14:41:11 crc kubenswrapper[5037]: I1126 14:41:11.882812 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-854dc8db7d-j5l6c" Nov 26 14:41:12 crc kubenswrapper[5037]: I1126 14:41:12.006632 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7965876c4f-xzpq9" Nov 26 14:41:12 crc kubenswrapper[5037]: I1126 14:41:12.082719 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-66b66f7449-2q2xk"] Nov 26 14:41:12 crc kubenswrapper[5037]: I1126 14:41:12.083154 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-66b66f7449-2q2xk" podUID="698376ab-e89f-4577-9bee-c562e82b32ba" containerName="dnsmasq-dns" containerID="cri-o://78b901dad012794cdf465b35ea24402937fd90a8cdaac5e8afdb3f90cb70b5cb" gracePeriod=10 Nov 26 14:41:12 crc kubenswrapper[5037]: I1126 14:41:12.226901 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 26 14:41:12 crc kubenswrapper[5037]: I1126 14:41:12.238279 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-fb548d49-hf8zh" Nov 26 14:41:12 crc kubenswrapper[5037]: I1126 14:41:12.319603 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 14:41:12 crc kubenswrapper[5037]: I1126 14:41:12.642023 5037 generic.go:334] "Generic (PLEG): container finished" podID="698376ab-e89f-4577-9bee-c562e82b32ba" containerID="78b901dad012794cdf465b35ea24402937fd90a8cdaac5e8afdb3f90cb70b5cb" exitCode=0 Nov 26 14:41:12 crc kubenswrapper[5037]: I1126 14:41:12.642121 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66b66f7449-2q2xk" event={"ID":"698376ab-e89f-4577-9bee-c562e82b32ba","Type":"ContainerDied","Data":"78b901dad012794cdf465b35ea24402937fd90a8cdaac5e8afdb3f90cb70b5cb"} Nov 26 14:41:12 crc kubenswrapper[5037]: I1126 14:41:12.642411 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66b66f7449-2q2xk" event={"ID":"698376ab-e89f-4577-9bee-c562e82b32ba","Type":"ContainerDied","Data":"ad8fd0a94ef2d905262b31754016f67d4907aa8555e998ed37bbd8c6b1746f2e"} Nov 26 14:41:12 crc kubenswrapper[5037]: I1126 14:41:12.642436 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ad8fd0a94ef2d905262b31754016f67d4907aa8555e998ed37bbd8c6b1746f2e" Nov 26 14:41:12 crc kubenswrapper[5037]: I1126 14:41:12.642530 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="24c45d25-9050-4e29-bffa-d6649cb506b9" containerName="cinder-scheduler" containerID="cri-o://a43008a09a3271c6cfb957852cebfe0149f1828f3d55dc6ae8ebbf1fb7af0f82" gracePeriod=30 Nov 26 14:41:12 crc kubenswrapper[5037]: I1126 14:41:12.642583 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="24c45d25-9050-4e29-bffa-d6649cb506b9" containerName="probe" containerID="cri-o://b5a76dab63bf1288a33999b8f62cf1b107ff7b2d0ed7cd81e704547774580946" gracePeriod=30 Nov 26 14:41:12 crc kubenswrapper[5037]: I1126 14:41:12.656976 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66b66f7449-2q2xk" Nov 26 14:41:12 crc kubenswrapper[5037]: I1126 14:41:12.727930 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/698376ab-e89f-4577-9bee-c562e82b32ba-ovsdbserver-sb\") pod \"698376ab-e89f-4577-9bee-c562e82b32ba\" (UID: \"698376ab-e89f-4577-9bee-c562e82b32ba\") " Nov 26 14:41:12 crc kubenswrapper[5037]: I1126 14:41:12.728157 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/698376ab-e89f-4577-9bee-c562e82b32ba-config\") pod \"698376ab-e89f-4577-9bee-c562e82b32ba\" (UID: \"698376ab-e89f-4577-9bee-c562e82b32ba\") " Nov 26 14:41:12 crc kubenswrapper[5037]: I1126 14:41:12.728319 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f5bmh\" (UniqueName: \"kubernetes.io/projected/698376ab-e89f-4577-9bee-c562e82b32ba-kube-api-access-f5bmh\") pod \"698376ab-e89f-4577-9bee-c562e82b32ba\" (UID: \"698376ab-e89f-4577-9bee-c562e82b32ba\") " Nov 26 14:41:12 crc kubenswrapper[5037]: I1126 14:41:12.728413 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/698376ab-e89f-4577-9bee-c562e82b32ba-ovsdbserver-nb\") pod \"698376ab-e89f-4577-9bee-c562e82b32ba\" (UID: \"698376ab-e89f-4577-9bee-c562e82b32ba\") " Nov 26 14:41:12 crc kubenswrapper[5037]: I1126 14:41:12.728546 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/698376ab-e89f-4577-9bee-c562e82b32ba-dns-svc\") pod \"698376ab-e89f-4577-9bee-c562e82b32ba\" (UID: \"698376ab-e89f-4577-9bee-c562e82b32ba\") " Nov 26 14:41:12 crc kubenswrapper[5037]: I1126 14:41:12.728659 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/698376ab-e89f-4577-9bee-c562e82b32ba-dns-swift-storage-0\") pod \"698376ab-e89f-4577-9bee-c562e82b32ba\" (UID: \"698376ab-e89f-4577-9bee-c562e82b32ba\") " Nov 26 14:41:12 crc kubenswrapper[5037]: I1126 14:41:12.734266 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/698376ab-e89f-4577-9bee-c562e82b32ba-kube-api-access-f5bmh" (OuterVolumeSpecName: "kube-api-access-f5bmh") pod "698376ab-e89f-4577-9bee-c562e82b32ba" (UID: "698376ab-e89f-4577-9bee-c562e82b32ba"). InnerVolumeSpecName "kube-api-access-f5bmh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:41:12 crc kubenswrapper[5037]: I1126 14:41:12.780935 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/698376ab-e89f-4577-9bee-c562e82b32ba-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "698376ab-e89f-4577-9bee-c562e82b32ba" (UID: "698376ab-e89f-4577-9bee-c562e82b32ba"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:41:12 crc kubenswrapper[5037]: I1126 14:41:12.785825 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/698376ab-e89f-4577-9bee-c562e82b32ba-config" (OuterVolumeSpecName: "config") pod "698376ab-e89f-4577-9bee-c562e82b32ba" (UID: "698376ab-e89f-4577-9bee-c562e82b32ba"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:41:12 crc kubenswrapper[5037]: I1126 14:41:12.795124 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/698376ab-e89f-4577-9bee-c562e82b32ba-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "698376ab-e89f-4577-9bee-c562e82b32ba" (UID: "698376ab-e89f-4577-9bee-c562e82b32ba"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:41:12 crc kubenswrapper[5037]: I1126 14:41:12.806057 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/698376ab-e89f-4577-9bee-c562e82b32ba-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "698376ab-e89f-4577-9bee-c562e82b32ba" (UID: "698376ab-e89f-4577-9bee-c562e82b32ba"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:41:12 crc kubenswrapper[5037]: I1126 14:41:12.814412 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/698376ab-e89f-4577-9bee-c562e82b32ba-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "698376ab-e89f-4577-9bee-c562e82b32ba" (UID: "698376ab-e89f-4577-9bee-c562e82b32ba"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:41:12 crc kubenswrapper[5037]: I1126 14:41:12.831621 5037 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/698376ab-e89f-4577-9bee-c562e82b32ba-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:12 crc kubenswrapper[5037]: I1126 14:41:12.831966 5037 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/698376ab-e89f-4577-9bee-c562e82b32ba-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:12 crc kubenswrapper[5037]: I1126 14:41:12.832096 5037 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/698376ab-e89f-4577-9bee-c562e82b32ba-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:12 crc kubenswrapper[5037]: I1126 14:41:12.832183 5037 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/698376ab-e89f-4577-9bee-c562e82b32ba-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:12 crc kubenswrapper[5037]: I1126 14:41:12.832454 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/698376ab-e89f-4577-9bee-c562e82b32ba-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:12 crc kubenswrapper[5037]: I1126 14:41:12.832565 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f5bmh\" (UniqueName: \"kubernetes.io/projected/698376ab-e89f-4577-9bee-c562e82b32ba-kube-api-access-f5bmh\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:13 crc kubenswrapper[5037]: I1126 14:41:13.653724 5037 generic.go:334] "Generic (PLEG): container finished" podID="24c45d25-9050-4e29-bffa-d6649cb506b9" containerID="b5a76dab63bf1288a33999b8f62cf1b107ff7b2d0ed7cd81e704547774580946" exitCode=0 Nov 26 14:41:13 crc kubenswrapper[5037]: I1126 14:41:13.653790 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"24c45d25-9050-4e29-bffa-d6649cb506b9","Type":"ContainerDied","Data":"b5a76dab63bf1288a33999b8f62cf1b107ff7b2d0ed7cd81e704547774580946"} Nov 26 14:41:13 crc kubenswrapper[5037]: I1126 14:41:13.654147 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66b66f7449-2q2xk" Nov 26 14:41:13 crc kubenswrapper[5037]: I1126 14:41:13.690634 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-66b66f7449-2q2xk"] Nov 26 14:41:13 crc kubenswrapper[5037]: I1126 14:41:13.701744 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-66b66f7449-2q2xk"] Nov 26 14:41:13 crc kubenswrapper[5037]: I1126 14:41:13.919197 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="698376ab-e89f-4577-9bee-c562e82b32ba" path="/var/lib/kubelet/pods/698376ab-e89f-4577-9bee-c562e82b32ba/volumes" Nov 26 14:41:14 crc kubenswrapper[5037]: I1126 14:41:14.641665 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-64b48fff64-cppwc" Nov 26 14:41:15 crc kubenswrapper[5037]: I1126 14:41:15.111168 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 26 14:41:15 crc kubenswrapper[5037]: E1126 14:41:15.111565 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="698376ab-e89f-4577-9bee-c562e82b32ba" containerName="init" Nov 26 14:41:15 crc kubenswrapper[5037]: I1126 14:41:15.111577 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="698376ab-e89f-4577-9bee-c562e82b32ba" containerName="init" Nov 26 14:41:15 crc kubenswrapper[5037]: E1126 14:41:15.111606 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="698376ab-e89f-4577-9bee-c562e82b32ba" containerName="dnsmasq-dns" Nov 26 14:41:15 crc kubenswrapper[5037]: I1126 14:41:15.111612 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="698376ab-e89f-4577-9bee-c562e82b32ba" containerName="dnsmasq-dns" Nov 26 14:41:15 crc kubenswrapper[5037]: I1126 14:41:15.111824 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="698376ab-e89f-4577-9bee-c562e82b32ba" containerName="dnsmasq-dns" Nov 26 14:41:15 crc kubenswrapper[5037]: I1126 14:41:15.112393 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 26 14:41:15 crc kubenswrapper[5037]: I1126 14:41:15.115074 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 26 14:41:15 crc kubenswrapper[5037]: I1126 14:41:15.115192 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-gd8bs" Nov 26 14:41:15 crc kubenswrapper[5037]: I1126 14:41:15.116573 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 26 14:41:15 crc kubenswrapper[5037]: I1126 14:41:15.121365 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 26 14:41:15 crc kubenswrapper[5037]: I1126 14:41:15.191266 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7fcafe6-2e7b-4893-84bc-5a3be7029ef7-combined-ca-bundle\") pod \"openstackclient\" (UID: \"e7fcafe6-2e7b-4893-84bc-5a3be7029ef7\") " pod="openstack/openstackclient" Nov 26 14:41:15 crc kubenswrapper[5037]: I1126 14:41:15.191340 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e7fcafe6-2e7b-4893-84bc-5a3be7029ef7-openstack-config-secret\") pod \"openstackclient\" (UID: \"e7fcafe6-2e7b-4893-84bc-5a3be7029ef7\") " pod="openstack/openstackclient" Nov 26 14:41:15 crc kubenswrapper[5037]: I1126 14:41:15.191411 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e7fcafe6-2e7b-4893-84bc-5a3be7029ef7-openstack-config\") pod \"openstackclient\" (UID: \"e7fcafe6-2e7b-4893-84bc-5a3be7029ef7\") " pod="openstack/openstackclient" Nov 26 14:41:15 crc kubenswrapper[5037]: I1126 14:41:15.191481 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2z7vp\" (UniqueName: \"kubernetes.io/projected/e7fcafe6-2e7b-4893-84bc-5a3be7029ef7-kube-api-access-2z7vp\") pod \"openstackclient\" (UID: \"e7fcafe6-2e7b-4893-84bc-5a3be7029ef7\") " pod="openstack/openstackclient" Nov 26 14:41:15 crc kubenswrapper[5037]: I1126 14:41:15.292898 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7fcafe6-2e7b-4893-84bc-5a3be7029ef7-combined-ca-bundle\") pod \"openstackclient\" (UID: \"e7fcafe6-2e7b-4893-84bc-5a3be7029ef7\") " pod="openstack/openstackclient" Nov 26 14:41:15 crc kubenswrapper[5037]: I1126 14:41:15.292948 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e7fcafe6-2e7b-4893-84bc-5a3be7029ef7-openstack-config-secret\") pod \"openstackclient\" (UID: \"e7fcafe6-2e7b-4893-84bc-5a3be7029ef7\") " pod="openstack/openstackclient" Nov 26 14:41:15 crc kubenswrapper[5037]: I1126 14:41:15.292974 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e7fcafe6-2e7b-4893-84bc-5a3be7029ef7-openstack-config\") pod \"openstackclient\" (UID: \"e7fcafe6-2e7b-4893-84bc-5a3be7029ef7\") " pod="openstack/openstackclient" Nov 26 14:41:15 crc kubenswrapper[5037]: I1126 14:41:15.293029 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2z7vp\" (UniqueName: \"kubernetes.io/projected/e7fcafe6-2e7b-4893-84bc-5a3be7029ef7-kube-api-access-2z7vp\") pod \"openstackclient\" (UID: \"e7fcafe6-2e7b-4893-84bc-5a3be7029ef7\") " pod="openstack/openstackclient" Nov 26 14:41:15 crc kubenswrapper[5037]: I1126 14:41:15.294157 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e7fcafe6-2e7b-4893-84bc-5a3be7029ef7-openstack-config\") pod \"openstackclient\" (UID: \"e7fcafe6-2e7b-4893-84bc-5a3be7029ef7\") " pod="openstack/openstackclient" Nov 26 14:41:15 crc kubenswrapper[5037]: I1126 14:41:15.298247 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7fcafe6-2e7b-4893-84bc-5a3be7029ef7-combined-ca-bundle\") pod \"openstackclient\" (UID: \"e7fcafe6-2e7b-4893-84bc-5a3be7029ef7\") " pod="openstack/openstackclient" Nov 26 14:41:15 crc kubenswrapper[5037]: I1126 14:41:15.309343 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e7fcafe6-2e7b-4893-84bc-5a3be7029ef7-openstack-config-secret\") pod \"openstackclient\" (UID: \"e7fcafe6-2e7b-4893-84bc-5a3be7029ef7\") " pod="openstack/openstackclient" Nov 26 14:41:15 crc kubenswrapper[5037]: I1126 14:41:15.314690 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2z7vp\" (UniqueName: \"kubernetes.io/projected/e7fcafe6-2e7b-4893-84bc-5a3be7029ef7-kube-api-access-2z7vp\") pod \"openstackclient\" (UID: \"e7fcafe6-2e7b-4893-84bc-5a3be7029ef7\") " pod="openstack/openstackclient" Nov 26 14:41:15 crc kubenswrapper[5037]: I1126 14:41:15.430685 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 26 14:41:15 crc kubenswrapper[5037]: W1126 14:41:15.921425 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode7fcafe6_2e7b_4893_84bc_5a3be7029ef7.slice/crio-6e9e417431ae79b05cac64353dc05afb86710478a14a06f054d0d800753d6b76 WatchSource:0}: Error finding container 6e9e417431ae79b05cac64353dc05afb86710478a14a06f054d0d800753d6b76: Status 404 returned error can't find the container with id 6e9e417431ae79b05cac64353dc05afb86710478a14a06f054d0d800753d6b76 Nov 26 14:41:15 crc kubenswrapper[5037]: I1126 14:41:15.923440 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 26 14:41:16 crc kubenswrapper[5037]: I1126 14:41:16.730457 5037 generic.go:334] "Generic (PLEG): container finished" podID="24c45d25-9050-4e29-bffa-d6649cb506b9" containerID="a43008a09a3271c6cfb957852cebfe0149f1828f3d55dc6ae8ebbf1fb7af0f82" exitCode=0 Nov 26 14:41:16 crc kubenswrapper[5037]: I1126 14:41:16.730805 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"24c45d25-9050-4e29-bffa-d6649cb506b9","Type":"ContainerDied","Data":"a43008a09a3271c6cfb957852cebfe0149f1828f3d55dc6ae8ebbf1fb7af0f82"} Nov 26 14:41:16 crc kubenswrapper[5037]: I1126 14:41:16.744432 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"e7fcafe6-2e7b-4893-84bc-5a3be7029ef7","Type":"ContainerStarted","Data":"6e9e417431ae79b05cac64353dc05afb86710478a14a06f054d0d800753d6b76"} Nov 26 14:41:16 crc kubenswrapper[5037]: I1126 14:41:16.879735 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.036983 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/24c45d25-9050-4e29-bffa-d6649cb506b9-config-data-custom\") pod \"24c45d25-9050-4e29-bffa-d6649cb506b9\" (UID: \"24c45d25-9050-4e29-bffa-d6649cb506b9\") " Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.037079 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/24c45d25-9050-4e29-bffa-d6649cb506b9-scripts\") pod \"24c45d25-9050-4e29-bffa-d6649cb506b9\" (UID: \"24c45d25-9050-4e29-bffa-d6649cb506b9\") " Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.037184 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/24c45d25-9050-4e29-bffa-d6649cb506b9-etc-machine-id\") pod \"24c45d25-9050-4e29-bffa-d6649cb506b9\" (UID: \"24c45d25-9050-4e29-bffa-d6649cb506b9\") " Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.037335 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/24c45d25-9050-4e29-bffa-d6649cb506b9-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "24c45d25-9050-4e29-bffa-d6649cb506b9" (UID: "24c45d25-9050-4e29-bffa-d6649cb506b9"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.037375 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24c45d25-9050-4e29-bffa-d6649cb506b9-combined-ca-bundle\") pod \"24c45d25-9050-4e29-bffa-d6649cb506b9\" (UID: \"24c45d25-9050-4e29-bffa-d6649cb506b9\") " Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.037705 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24c45d25-9050-4e29-bffa-d6649cb506b9-config-data\") pod \"24c45d25-9050-4e29-bffa-d6649cb506b9\" (UID: \"24c45d25-9050-4e29-bffa-d6649cb506b9\") " Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.037730 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cs85k\" (UniqueName: \"kubernetes.io/projected/24c45d25-9050-4e29-bffa-d6649cb506b9-kube-api-access-cs85k\") pod \"24c45d25-9050-4e29-bffa-d6649cb506b9\" (UID: \"24c45d25-9050-4e29-bffa-d6649cb506b9\") " Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.038216 5037 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/24c45d25-9050-4e29-bffa-d6649cb506b9-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.045500 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24c45d25-9050-4e29-bffa-d6649cb506b9-kube-api-access-cs85k" (OuterVolumeSpecName: "kube-api-access-cs85k") pod "24c45d25-9050-4e29-bffa-d6649cb506b9" (UID: "24c45d25-9050-4e29-bffa-d6649cb506b9"). InnerVolumeSpecName "kube-api-access-cs85k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.046946 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24c45d25-9050-4e29-bffa-d6649cb506b9-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "24c45d25-9050-4e29-bffa-d6649cb506b9" (UID: "24c45d25-9050-4e29-bffa-d6649cb506b9"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.049647 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24c45d25-9050-4e29-bffa-d6649cb506b9-scripts" (OuterVolumeSpecName: "scripts") pod "24c45d25-9050-4e29-bffa-d6649cb506b9" (UID: "24c45d25-9050-4e29-bffa-d6649cb506b9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.100342 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24c45d25-9050-4e29-bffa-d6649cb506b9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "24c45d25-9050-4e29-bffa-d6649cb506b9" (UID: "24c45d25-9050-4e29-bffa-d6649cb506b9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.140052 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24c45d25-9050-4e29-bffa-d6649cb506b9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.140100 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cs85k\" (UniqueName: \"kubernetes.io/projected/24c45d25-9050-4e29-bffa-d6649cb506b9-kube-api-access-cs85k\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.140115 5037 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/24c45d25-9050-4e29-bffa-d6649cb506b9-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.140128 5037 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/24c45d25-9050-4e29-bffa-d6649cb506b9-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.147179 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24c45d25-9050-4e29-bffa-d6649cb506b9-config-data" (OuterVolumeSpecName: "config-data") pod "24c45d25-9050-4e29-bffa-d6649cb506b9" (UID: "24c45d25-9050-4e29-bffa-d6649cb506b9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.241657 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24c45d25-9050-4e29-bffa-d6649cb506b9-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.763731 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"24c45d25-9050-4e29-bffa-d6649cb506b9","Type":"ContainerDied","Data":"05e09690be6f9ddd53946bab3ea2b361511b04a4eecc3d3b1c80250f8b18f65a"} Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.763780 5037 scope.go:117] "RemoveContainer" containerID="b5a76dab63bf1288a33999b8f62cf1b107ff7b2d0ed7cd81e704547774580946" Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.763924 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.802643 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.811245 5037 scope.go:117] "RemoveContainer" containerID="a43008a09a3271c6cfb957852cebfe0149f1828f3d55dc6ae8ebbf1fb7af0f82" Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.822396 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.839341 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 14:41:17 crc kubenswrapper[5037]: E1126 14:41:17.839778 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24c45d25-9050-4e29-bffa-d6649cb506b9" containerName="cinder-scheduler" Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.839796 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="24c45d25-9050-4e29-bffa-d6649cb506b9" containerName="cinder-scheduler" Nov 26 14:41:17 crc kubenswrapper[5037]: E1126 14:41:17.839816 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24c45d25-9050-4e29-bffa-d6649cb506b9" containerName="probe" Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.839821 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="24c45d25-9050-4e29-bffa-d6649cb506b9" containerName="probe" Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.839981 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="24c45d25-9050-4e29-bffa-d6649cb506b9" containerName="cinder-scheduler" Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.839995 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="24c45d25-9050-4e29-bffa-d6649cb506b9" containerName="probe" Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.840958 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.845023 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.847410 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.926773 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="24c45d25-9050-4e29-bffa-d6649cb506b9" path="/var/lib/kubelet/pods/24c45d25-9050-4e29-bffa-d6649cb506b9/volumes" Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.954874 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"fe17b260-d105-4274-88d1-d85fd9948f9f\") " pod="openstack/cinder-scheduler-0" Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.955230 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8fskp\" (UniqueName: \"kubernetes.io/projected/fe17b260-d105-4274-88d1-d85fd9948f9f-kube-api-access-8fskp\") pod \"cinder-scheduler-0\" (UID: \"fe17b260-d105-4274-88d1-d85fd9948f9f\") " pod="openstack/cinder-scheduler-0" Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.955274 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"fe17b260-d105-4274-88d1-d85fd9948f9f\") " pod="openstack/cinder-scheduler-0" Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.955379 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-config-data\") pod \"cinder-scheduler-0\" (UID: \"fe17b260-d105-4274-88d1-d85fd9948f9f\") " pod="openstack/cinder-scheduler-0" Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.955443 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-scripts\") pod \"cinder-scheduler-0\" (UID: \"fe17b260-d105-4274-88d1-d85fd9948f9f\") " pod="openstack/cinder-scheduler-0" Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.955469 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/fe17b260-d105-4274-88d1-d85fd9948f9f-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"fe17b260-d105-4274-88d1-d85fd9948f9f\") " pod="openstack/cinder-scheduler-0" Nov 26 14:41:17 crc kubenswrapper[5037]: I1126 14:41:17.976395 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-7ccc6df59c-m5tjx" Nov 26 14:41:18 crc kubenswrapper[5037]: I1126 14:41:18.030247 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-64b48fff64-cppwc"] Nov 26 14:41:18 crc kubenswrapper[5037]: I1126 14:41:18.034796 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-64b48fff64-cppwc" podUID="298b5c3b-8afb-4805-90cc-6e13fa47f559" containerName="neutron-api" containerID="cri-o://b4375d3a48b5220ef835c2698b095719bcb6787ea1adbe7ef2c0d8398408bf27" gracePeriod=30 Nov 26 14:41:18 crc kubenswrapper[5037]: I1126 14:41:18.035189 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-64b48fff64-cppwc" podUID="298b5c3b-8afb-4805-90cc-6e13fa47f559" containerName="neutron-httpd" containerID="cri-o://438c6201d6b1523ba7fbc43efafce89a346d81d24d2c43b9d63c88e3e6ba3ae5" gracePeriod=30 Nov 26 14:41:18 crc kubenswrapper[5037]: I1126 14:41:18.057314 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8fskp\" (UniqueName: \"kubernetes.io/projected/fe17b260-d105-4274-88d1-d85fd9948f9f-kube-api-access-8fskp\") pod \"cinder-scheduler-0\" (UID: \"fe17b260-d105-4274-88d1-d85fd9948f9f\") " pod="openstack/cinder-scheduler-0" Nov 26 14:41:18 crc kubenswrapper[5037]: I1126 14:41:18.057399 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"fe17b260-d105-4274-88d1-d85fd9948f9f\") " pod="openstack/cinder-scheduler-0" Nov 26 14:41:18 crc kubenswrapper[5037]: I1126 14:41:18.057434 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-config-data\") pod \"cinder-scheduler-0\" (UID: \"fe17b260-d105-4274-88d1-d85fd9948f9f\") " pod="openstack/cinder-scheduler-0" Nov 26 14:41:18 crc kubenswrapper[5037]: I1126 14:41:18.057495 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-scripts\") pod \"cinder-scheduler-0\" (UID: \"fe17b260-d105-4274-88d1-d85fd9948f9f\") " pod="openstack/cinder-scheduler-0" Nov 26 14:41:18 crc kubenswrapper[5037]: I1126 14:41:18.057515 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/fe17b260-d105-4274-88d1-d85fd9948f9f-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"fe17b260-d105-4274-88d1-d85fd9948f9f\") " pod="openstack/cinder-scheduler-0" Nov 26 14:41:18 crc kubenswrapper[5037]: I1126 14:41:18.057627 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"fe17b260-d105-4274-88d1-d85fd9948f9f\") " pod="openstack/cinder-scheduler-0" Nov 26 14:41:18 crc kubenswrapper[5037]: I1126 14:41:18.060566 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/fe17b260-d105-4274-88d1-d85fd9948f9f-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"fe17b260-d105-4274-88d1-d85fd9948f9f\") " pod="openstack/cinder-scheduler-0" Nov 26 14:41:18 crc kubenswrapper[5037]: I1126 14:41:18.065579 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"fe17b260-d105-4274-88d1-d85fd9948f9f\") " pod="openstack/cinder-scheduler-0" Nov 26 14:41:18 crc kubenswrapper[5037]: I1126 14:41:18.066560 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-config-data\") pod \"cinder-scheduler-0\" (UID: \"fe17b260-d105-4274-88d1-d85fd9948f9f\") " pod="openstack/cinder-scheduler-0" Nov 26 14:41:18 crc kubenswrapper[5037]: I1126 14:41:18.067839 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-scripts\") pod \"cinder-scheduler-0\" (UID: \"fe17b260-d105-4274-88d1-d85fd9948f9f\") " pod="openstack/cinder-scheduler-0" Nov 26 14:41:18 crc kubenswrapper[5037]: I1126 14:41:18.068304 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"fe17b260-d105-4274-88d1-d85fd9948f9f\") " pod="openstack/cinder-scheduler-0" Nov 26 14:41:18 crc kubenswrapper[5037]: I1126 14:41:18.077831 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8fskp\" (UniqueName: \"kubernetes.io/projected/fe17b260-d105-4274-88d1-d85fd9948f9f-kube-api-access-8fskp\") pod \"cinder-scheduler-0\" (UID: \"fe17b260-d105-4274-88d1-d85fd9948f9f\") " pod="openstack/cinder-scheduler-0" Nov 26 14:41:18 crc kubenswrapper[5037]: I1126 14:41:18.168771 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 14:41:18 crc kubenswrapper[5037]: I1126 14:41:18.667695 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 14:41:18 crc kubenswrapper[5037]: W1126 14:41:18.690536 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfe17b260_d105_4274_88d1_d85fd9948f9f.slice/crio-cefb06f33be235b2147738c7a9f8b79ef5ea8381b1098388471d2071462e2d8a WatchSource:0}: Error finding container cefb06f33be235b2147738c7a9f8b79ef5ea8381b1098388471d2071462e2d8a: Status 404 returned error can't find the container with id cefb06f33be235b2147738c7a9f8b79ef5ea8381b1098388471d2071462e2d8a Nov 26 14:41:18 crc kubenswrapper[5037]: I1126 14:41:18.813921 5037 generic.go:334] "Generic (PLEG): container finished" podID="298b5c3b-8afb-4805-90cc-6e13fa47f559" containerID="438c6201d6b1523ba7fbc43efafce89a346d81d24d2c43b9d63c88e3e6ba3ae5" exitCode=0 Nov 26 14:41:18 crc kubenswrapper[5037]: I1126 14:41:18.814002 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-64b48fff64-cppwc" event={"ID":"298b5c3b-8afb-4805-90cc-6e13fa47f559","Type":"ContainerDied","Data":"438c6201d6b1523ba7fbc43efafce89a346d81d24d2c43b9d63c88e3e6ba3ae5"} Nov 26 14:41:18 crc kubenswrapper[5037]: I1126 14:41:18.816875 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"fe17b260-d105-4274-88d1-d85fd9948f9f","Type":"ContainerStarted","Data":"cefb06f33be235b2147738c7a9f8b79ef5ea8381b1098388471d2071462e2d8a"} Nov 26 14:41:18 crc kubenswrapper[5037]: I1126 14:41:18.907884 5037 scope.go:117] "RemoveContainer" containerID="5e69d7717514aa68d798cc4f8eee9b2d5d3e9666ca3b110c2cb4c6b90f9e1181" Nov 26 14:41:18 crc kubenswrapper[5037]: E1126 14:41:18.908417 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:41:19 crc kubenswrapper[5037]: I1126 14:41:19.107576 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 26 14:41:19 crc kubenswrapper[5037]: I1126 14:41:19.904456 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"fe17b260-d105-4274-88d1-d85fd9948f9f","Type":"ContainerStarted","Data":"a141207a0fff58064f3407d6c288ff7903f292bd3e192081eb2a010bd7fcf95d"} Nov 26 14:41:20 crc kubenswrapper[5037]: I1126 14:41:20.920797 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"fe17b260-d105-4274-88d1-d85fd9948f9f","Type":"ContainerStarted","Data":"140e7be2182c285f86914d1d0349ab0f880704f06b09bd28f8522e6957b1e06c"} Nov 26 14:41:20 crc kubenswrapper[5037]: I1126 14:41:20.948001 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.947947559 podStartE2EDuration="3.947947559s" podCreationTimestamp="2025-11-26 14:41:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:41:20.947431177 +0000 UTC m=+1547.744201361" watchObservedRunningTime="2025-11-26 14:41:20.947947559 +0000 UTC m=+1547.744717743" Nov 26 14:41:21 crc kubenswrapper[5037]: I1126 14:41:21.141979 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-7f55999cfc-jx9r6"] Nov 26 14:41:21 crc kubenswrapper[5037]: I1126 14:41:21.143486 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-7f55999cfc-jx9r6" Nov 26 14:41:21 crc kubenswrapper[5037]: I1126 14:41:21.150721 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 26 14:41:21 crc kubenswrapper[5037]: I1126 14:41:21.150948 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 26 14:41:21 crc kubenswrapper[5037]: I1126 14:41:21.151190 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 26 14:41:21 crc kubenswrapper[5037]: I1126 14:41:21.152024 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-7f55999cfc-jx9r6"] Nov 26 14:41:21 crc kubenswrapper[5037]: I1126 14:41:21.213064 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m28hp\" (UniqueName: \"kubernetes.io/projected/aed636f4-272c-4379-a6f3-8247ae0e46cc-kube-api-access-m28hp\") pod \"swift-proxy-7f55999cfc-jx9r6\" (UID: \"aed636f4-272c-4379-a6f3-8247ae0e46cc\") " pod="openstack/swift-proxy-7f55999cfc-jx9r6" Nov 26 14:41:21 crc kubenswrapper[5037]: I1126 14:41:21.213172 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/aed636f4-272c-4379-a6f3-8247ae0e46cc-etc-swift\") pod \"swift-proxy-7f55999cfc-jx9r6\" (UID: \"aed636f4-272c-4379-a6f3-8247ae0e46cc\") " pod="openstack/swift-proxy-7f55999cfc-jx9r6" Nov 26 14:41:21 crc kubenswrapper[5037]: I1126 14:41:21.213235 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aed636f4-272c-4379-a6f3-8247ae0e46cc-config-data\") pod \"swift-proxy-7f55999cfc-jx9r6\" (UID: \"aed636f4-272c-4379-a6f3-8247ae0e46cc\") " pod="openstack/swift-proxy-7f55999cfc-jx9r6" Nov 26 14:41:21 crc kubenswrapper[5037]: I1126 14:41:21.213331 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/aed636f4-272c-4379-a6f3-8247ae0e46cc-log-httpd\") pod \"swift-proxy-7f55999cfc-jx9r6\" (UID: \"aed636f4-272c-4379-a6f3-8247ae0e46cc\") " pod="openstack/swift-proxy-7f55999cfc-jx9r6" Nov 26 14:41:21 crc kubenswrapper[5037]: I1126 14:41:21.213355 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/aed636f4-272c-4379-a6f3-8247ae0e46cc-run-httpd\") pod \"swift-proxy-7f55999cfc-jx9r6\" (UID: \"aed636f4-272c-4379-a6f3-8247ae0e46cc\") " pod="openstack/swift-proxy-7f55999cfc-jx9r6" Nov 26 14:41:21 crc kubenswrapper[5037]: I1126 14:41:21.213379 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aed636f4-272c-4379-a6f3-8247ae0e46cc-combined-ca-bundle\") pod \"swift-proxy-7f55999cfc-jx9r6\" (UID: \"aed636f4-272c-4379-a6f3-8247ae0e46cc\") " pod="openstack/swift-proxy-7f55999cfc-jx9r6" Nov 26 14:41:21 crc kubenswrapper[5037]: I1126 14:41:21.213468 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/aed636f4-272c-4379-a6f3-8247ae0e46cc-public-tls-certs\") pod \"swift-proxy-7f55999cfc-jx9r6\" (UID: \"aed636f4-272c-4379-a6f3-8247ae0e46cc\") " pod="openstack/swift-proxy-7f55999cfc-jx9r6" Nov 26 14:41:21 crc kubenswrapper[5037]: I1126 14:41:21.213509 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/aed636f4-272c-4379-a6f3-8247ae0e46cc-internal-tls-certs\") pod \"swift-proxy-7f55999cfc-jx9r6\" (UID: \"aed636f4-272c-4379-a6f3-8247ae0e46cc\") " pod="openstack/swift-proxy-7f55999cfc-jx9r6" Nov 26 14:41:21 crc kubenswrapper[5037]: I1126 14:41:21.318161 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aed636f4-272c-4379-a6f3-8247ae0e46cc-config-data\") pod \"swift-proxy-7f55999cfc-jx9r6\" (UID: \"aed636f4-272c-4379-a6f3-8247ae0e46cc\") " pod="openstack/swift-proxy-7f55999cfc-jx9r6" Nov 26 14:41:21 crc kubenswrapper[5037]: I1126 14:41:21.318332 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/aed636f4-272c-4379-a6f3-8247ae0e46cc-log-httpd\") pod \"swift-proxy-7f55999cfc-jx9r6\" (UID: \"aed636f4-272c-4379-a6f3-8247ae0e46cc\") " pod="openstack/swift-proxy-7f55999cfc-jx9r6" Nov 26 14:41:21 crc kubenswrapper[5037]: I1126 14:41:21.318363 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/aed636f4-272c-4379-a6f3-8247ae0e46cc-run-httpd\") pod \"swift-proxy-7f55999cfc-jx9r6\" (UID: \"aed636f4-272c-4379-a6f3-8247ae0e46cc\") " pod="openstack/swift-proxy-7f55999cfc-jx9r6" Nov 26 14:41:21 crc kubenswrapper[5037]: I1126 14:41:21.318413 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aed636f4-272c-4379-a6f3-8247ae0e46cc-combined-ca-bundle\") pod \"swift-proxy-7f55999cfc-jx9r6\" (UID: \"aed636f4-272c-4379-a6f3-8247ae0e46cc\") " pod="openstack/swift-proxy-7f55999cfc-jx9r6" Nov 26 14:41:21 crc kubenswrapper[5037]: I1126 14:41:21.318469 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/aed636f4-272c-4379-a6f3-8247ae0e46cc-public-tls-certs\") pod \"swift-proxy-7f55999cfc-jx9r6\" (UID: \"aed636f4-272c-4379-a6f3-8247ae0e46cc\") " pod="openstack/swift-proxy-7f55999cfc-jx9r6" Nov 26 14:41:21 crc kubenswrapper[5037]: I1126 14:41:21.318497 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/aed636f4-272c-4379-a6f3-8247ae0e46cc-internal-tls-certs\") pod \"swift-proxy-7f55999cfc-jx9r6\" (UID: \"aed636f4-272c-4379-a6f3-8247ae0e46cc\") " pod="openstack/swift-proxy-7f55999cfc-jx9r6" Nov 26 14:41:21 crc kubenswrapper[5037]: I1126 14:41:21.318571 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m28hp\" (UniqueName: \"kubernetes.io/projected/aed636f4-272c-4379-a6f3-8247ae0e46cc-kube-api-access-m28hp\") pod \"swift-proxy-7f55999cfc-jx9r6\" (UID: \"aed636f4-272c-4379-a6f3-8247ae0e46cc\") " pod="openstack/swift-proxy-7f55999cfc-jx9r6" Nov 26 14:41:21 crc kubenswrapper[5037]: I1126 14:41:21.318627 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/aed636f4-272c-4379-a6f3-8247ae0e46cc-etc-swift\") pod \"swift-proxy-7f55999cfc-jx9r6\" (UID: \"aed636f4-272c-4379-a6f3-8247ae0e46cc\") " pod="openstack/swift-proxy-7f55999cfc-jx9r6" Nov 26 14:41:21 crc kubenswrapper[5037]: I1126 14:41:21.319060 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/aed636f4-272c-4379-a6f3-8247ae0e46cc-log-httpd\") pod \"swift-proxy-7f55999cfc-jx9r6\" (UID: \"aed636f4-272c-4379-a6f3-8247ae0e46cc\") " pod="openstack/swift-proxy-7f55999cfc-jx9r6" Nov 26 14:41:21 crc kubenswrapper[5037]: I1126 14:41:21.319218 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/aed636f4-272c-4379-a6f3-8247ae0e46cc-run-httpd\") pod \"swift-proxy-7f55999cfc-jx9r6\" (UID: \"aed636f4-272c-4379-a6f3-8247ae0e46cc\") " pod="openstack/swift-proxy-7f55999cfc-jx9r6" Nov 26 14:41:21 crc kubenswrapper[5037]: I1126 14:41:21.325061 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/aed636f4-272c-4379-a6f3-8247ae0e46cc-public-tls-certs\") pod \"swift-proxy-7f55999cfc-jx9r6\" (UID: \"aed636f4-272c-4379-a6f3-8247ae0e46cc\") " pod="openstack/swift-proxy-7f55999cfc-jx9r6" Nov 26 14:41:21 crc kubenswrapper[5037]: I1126 14:41:21.326349 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/aed636f4-272c-4379-a6f3-8247ae0e46cc-etc-swift\") pod \"swift-proxy-7f55999cfc-jx9r6\" (UID: \"aed636f4-272c-4379-a6f3-8247ae0e46cc\") " pod="openstack/swift-proxy-7f55999cfc-jx9r6" Nov 26 14:41:21 crc kubenswrapper[5037]: I1126 14:41:21.326986 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/aed636f4-272c-4379-a6f3-8247ae0e46cc-internal-tls-certs\") pod \"swift-proxy-7f55999cfc-jx9r6\" (UID: \"aed636f4-272c-4379-a6f3-8247ae0e46cc\") " pod="openstack/swift-proxy-7f55999cfc-jx9r6" Nov 26 14:41:21 crc kubenswrapper[5037]: I1126 14:41:21.339672 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aed636f4-272c-4379-a6f3-8247ae0e46cc-config-data\") pod \"swift-proxy-7f55999cfc-jx9r6\" (UID: \"aed636f4-272c-4379-a6f3-8247ae0e46cc\") " pod="openstack/swift-proxy-7f55999cfc-jx9r6" Nov 26 14:41:21 crc kubenswrapper[5037]: I1126 14:41:21.351069 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m28hp\" (UniqueName: \"kubernetes.io/projected/aed636f4-272c-4379-a6f3-8247ae0e46cc-kube-api-access-m28hp\") pod \"swift-proxy-7f55999cfc-jx9r6\" (UID: \"aed636f4-272c-4379-a6f3-8247ae0e46cc\") " pod="openstack/swift-proxy-7f55999cfc-jx9r6" Nov 26 14:41:21 crc kubenswrapper[5037]: I1126 14:41:21.357242 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aed636f4-272c-4379-a6f3-8247ae0e46cc-combined-ca-bundle\") pod \"swift-proxy-7f55999cfc-jx9r6\" (UID: \"aed636f4-272c-4379-a6f3-8247ae0e46cc\") " pod="openstack/swift-proxy-7f55999cfc-jx9r6" Nov 26 14:41:21 crc kubenswrapper[5037]: I1126 14:41:21.484687 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-7f55999cfc-jx9r6" Nov 26 14:41:21 crc kubenswrapper[5037]: I1126 14:41:21.948456 5037 generic.go:334] "Generic (PLEG): container finished" podID="298b5c3b-8afb-4805-90cc-6e13fa47f559" containerID="b4375d3a48b5220ef835c2698b095719bcb6787ea1adbe7ef2c0d8398408bf27" exitCode=0 Nov 26 14:41:21 crc kubenswrapper[5037]: I1126 14:41:21.948509 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-64b48fff64-cppwc" event={"ID":"298b5c3b-8afb-4805-90cc-6e13fa47f559","Type":"ContainerDied","Data":"b4375d3a48b5220ef835c2698b095719bcb6787ea1adbe7ef2c0d8398408bf27"} Nov 26 14:41:21 crc kubenswrapper[5037]: I1126 14:41:21.948911 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-64b48fff64-cppwc" event={"ID":"298b5c3b-8afb-4805-90cc-6e13fa47f559","Type":"ContainerDied","Data":"78c1ca84313ad54257e28c624bc8832f63af0838347c1bedb6b8ffcb27871418"} Nov 26 14:41:21 crc kubenswrapper[5037]: I1126 14:41:21.948948 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="78c1ca84313ad54257e28c624bc8832f63af0838347c1bedb6b8ffcb27871418" Nov 26 14:41:21 crc kubenswrapper[5037]: I1126 14:41:21.960968 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-64b48fff64-cppwc" Nov 26 14:41:22 crc kubenswrapper[5037]: I1126 14:41:22.002248 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:41:22 crc kubenswrapper[5037]: I1126 14:41:22.002550 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8ec55994-5e0e-4f1e-aece-501836b46c63" containerName="ceilometer-central-agent" containerID="cri-o://ecfe02c1dc20c77b83de3aad46edd5f2d8609268e30702f511a086bdc4e1820b" gracePeriod=30 Nov 26 14:41:22 crc kubenswrapper[5037]: I1126 14:41:22.002946 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8ec55994-5e0e-4f1e-aece-501836b46c63" containerName="ceilometer-notification-agent" containerID="cri-o://77e3b705d52ae0fd1eec5a8ec00e21c0da85ce8a8b08e853102e7ce45712d1e1" gracePeriod=30 Nov 26 14:41:22 crc kubenswrapper[5037]: I1126 14:41:22.002996 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8ec55994-5e0e-4f1e-aece-501836b46c63" containerName="proxy-httpd" containerID="cri-o://a636eb1c52ba5fb1a646ed001892d91916061ddd778605fd9b8ccd3a6c44f12c" gracePeriod=30 Nov 26 14:41:22 crc kubenswrapper[5037]: I1126 14:41:22.003043 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8ec55994-5e0e-4f1e-aece-501836b46c63" containerName="sg-core" containerID="cri-o://1cac0a928a182d949345b3bf28a7eb70e03e598482ad70f0156fa523bf850643" gracePeriod=30 Nov 26 14:41:22 crc kubenswrapper[5037]: I1126 14:41:22.030574 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="8ec55994-5e0e-4f1e-aece-501836b46c63" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 502" Nov 26 14:41:22 crc kubenswrapper[5037]: I1126 14:41:22.042053 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/298b5c3b-8afb-4805-90cc-6e13fa47f559-config\") pod \"298b5c3b-8afb-4805-90cc-6e13fa47f559\" (UID: \"298b5c3b-8afb-4805-90cc-6e13fa47f559\") " Nov 26 14:41:22 crc kubenswrapper[5037]: I1126 14:41:22.042144 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/298b5c3b-8afb-4805-90cc-6e13fa47f559-combined-ca-bundle\") pod \"298b5c3b-8afb-4805-90cc-6e13fa47f559\" (UID: \"298b5c3b-8afb-4805-90cc-6e13fa47f559\") " Nov 26 14:41:22 crc kubenswrapper[5037]: I1126 14:41:22.042175 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/298b5c3b-8afb-4805-90cc-6e13fa47f559-httpd-config\") pod \"298b5c3b-8afb-4805-90cc-6e13fa47f559\" (UID: \"298b5c3b-8afb-4805-90cc-6e13fa47f559\") " Nov 26 14:41:22 crc kubenswrapper[5037]: I1126 14:41:22.042230 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/298b5c3b-8afb-4805-90cc-6e13fa47f559-ovndb-tls-certs\") pod \"298b5c3b-8afb-4805-90cc-6e13fa47f559\" (UID: \"298b5c3b-8afb-4805-90cc-6e13fa47f559\") " Nov 26 14:41:22 crc kubenswrapper[5037]: I1126 14:41:22.042319 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x85nl\" (UniqueName: \"kubernetes.io/projected/298b5c3b-8afb-4805-90cc-6e13fa47f559-kube-api-access-x85nl\") pod \"298b5c3b-8afb-4805-90cc-6e13fa47f559\" (UID: \"298b5c3b-8afb-4805-90cc-6e13fa47f559\") " Nov 26 14:41:22 crc kubenswrapper[5037]: I1126 14:41:22.048421 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/298b5c3b-8afb-4805-90cc-6e13fa47f559-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "298b5c3b-8afb-4805-90cc-6e13fa47f559" (UID: "298b5c3b-8afb-4805-90cc-6e13fa47f559"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:22 crc kubenswrapper[5037]: I1126 14:41:22.048627 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/298b5c3b-8afb-4805-90cc-6e13fa47f559-kube-api-access-x85nl" (OuterVolumeSpecName: "kube-api-access-x85nl") pod "298b5c3b-8afb-4805-90cc-6e13fa47f559" (UID: "298b5c3b-8afb-4805-90cc-6e13fa47f559"). InnerVolumeSpecName "kube-api-access-x85nl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:41:22 crc kubenswrapper[5037]: I1126 14:41:22.102392 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/298b5c3b-8afb-4805-90cc-6e13fa47f559-config" (OuterVolumeSpecName: "config") pod "298b5c3b-8afb-4805-90cc-6e13fa47f559" (UID: "298b5c3b-8afb-4805-90cc-6e13fa47f559"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:22 crc kubenswrapper[5037]: I1126 14:41:22.124744 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/298b5c3b-8afb-4805-90cc-6e13fa47f559-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "298b5c3b-8afb-4805-90cc-6e13fa47f559" (UID: "298b5c3b-8afb-4805-90cc-6e13fa47f559"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:22 crc kubenswrapper[5037]: I1126 14:41:22.134862 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/298b5c3b-8afb-4805-90cc-6e13fa47f559-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "298b5c3b-8afb-4805-90cc-6e13fa47f559" (UID: "298b5c3b-8afb-4805-90cc-6e13fa47f559"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:22 crc kubenswrapper[5037]: I1126 14:41:22.144726 5037 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/298b5c3b-8afb-4805-90cc-6e13fa47f559-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:22 crc kubenswrapper[5037]: I1126 14:41:22.144752 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x85nl\" (UniqueName: \"kubernetes.io/projected/298b5c3b-8afb-4805-90cc-6e13fa47f559-kube-api-access-x85nl\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:22 crc kubenswrapper[5037]: I1126 14:41:22.144763 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/298b5c3b-8afb-4805-90cc-6e13fa47f559-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:22 crc kubenswrapper[5037]: I1126 14:41:22.144773 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/298b5c3b-8afb-4805-90cc-6e13fa47f559-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:22 crc kubenswrapper[5037]: I1126 14:41:22.144787 5037 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/298b5c3b-8afb-4805-90cc-6e13fa47f559-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:22 crc kubenswrapper[5037]: W1126 14:41:22.178212 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaed636f4_272c_4379_a6f3_8247ae0e46cc.slice/crio-f1391f198bbb2adeeea1f5c8a5548c186c4cc57e19e6dd816b8890f1380cf087 WatchSource:0}: Error finding container f1391f198bbb2adeeea1f5c8a5548c186c4cc57e19e6dd816b8890f1380cf087: Status 404 returned error can't find the container with id f1391f198bbb2adeeea1f5c8a5548c186c4cc57e19e6dd816b8890f1380cf087 Nov 26 14:41:22 crc kubenswrapper[5037]: I1126 14:41:22.184023 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-7f55999cfc-jx9r6"] Nov 26 14:41:22 crc kubenswrapper[5037]: I1126 14:41:22.968174 5037 generic.go:334] "Generic (PLEG): container finished" podID="8ec55994-5e0e-4f1e-aece-501836b46c63" containerID="a636eb1c52ba5fb1a646ed001892d91916061ddd778605fd9b8ccd3a6c44f12c" exitCode=0 Nov 26 14:41:22 crc kubenswrapper[5037]: I1126 14:41:22.968632 5037 generic.go:334] "Generic (PLEG): container finished" podID="8ec55994-5e0e-4f1e-aece-501836b46c63" containerID="1cac0a928a182d949345b3bf28a7eb70e03e598482ad70f0156fa523bf850643" exitCode=2 Nov 26 14:41:22 crc kubenswrapper[5037]: I1126 14:41:22.968254 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8ec55994-5e0e-4f1e-aece-501836b46c63","Type":"ContainerDied","Data":"a636eb1c52ba5fb1a646ed001892d91916061ddd778605fd9b8ccd3a6c44f12c"} Nov 26 14:41:22 crc kubenswrapper[5037]: I1126 14:41:22.968719 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8ec55994-5e0e-4f1e-aece-501836b46c63","Type":"ContainerDied","Data":"1cac0a928a182d949345b3bf28a7eb70e03e598482ad70f0156fa523bf850643"} Nov 26 14:41:22 crc kubenswrapper[5037]: I1126 14:41:22.968757 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8ec55994-5e0e-4f1e-aece-501836b46c63","Type":"ContainerDied","Data":"77e3b705d52ae0fd1eec5a8ec00e21c0da85ce8a8b08e853102e7ce45712d1e1"} Nov 26 14:41:22 crc kubenswrapper[5037]: I1126 14:41:22.968647 5037 generic.go:334] "Generic (PLEG): container finished" podID="8ec55994-5e0e-4f1e-aece-501836b46c63" containerID="77e3b705d52ae0fd1eec5a8ec00e21c0da85ce8a8b08e853102e7ce45712d1e1" exitCode=0 Nov 26 14:41:22 crc kubenswrapper[5037]: I1126 14:41:22.968798 5037 generic.go:334] "Generic (PLEG): container finished" podID="8ec55994-5e0e-4f1e-aece-501836b46c63" containerID="ecfe02c1dc20c77b83de3aad46edd5f2d8609268e30702f511a086bdc4e1820b" exitCode=0 Nov 26 14:41:22 crc kubenswrapper[5037]: I1126 14:41:22.968953 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8ec55994-5e0e-4f1e-aece-501836b46c63","Type":"ContainerDied","Data":"ecfe02c1dc20c77b83de3aad46edd5f2d8609268e30702f511a086bdc4e1820b"} Nov 26 14:41:22 crc kubenswrapper[5037]: I1126 14:41:22.972434 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7f55999cfc-jx9r6" event={"ID":"aed636f4-272c-4379-a6f3-8247ae0e46cc","Type":"ContainerStarted","Data":"2019faaa0d00a4dce9f4ce3484825a1f6132bcb7d194dfe843a8fd57678e6f7d"} Nov 26 14:41:22 crc kubenswrapper[5037]: I1126 14:41:22.972503 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7f55999cfc-jx9r6" event={"ID":"aed636f4-272c-4379-a6f3-8247ae0e46cc","Type":"ContainerStarted","Data":"ac124cedaed73284f9bbf36277718f97b7c752f739fd563062cca9f2857a6274"} Nov 26 14:41:22 crc kubenswrapper[5037]: I1126 14:41:22.972533 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7f55999cfc-jx9r6" event={"ID":"aed636f4-272c-4379-a6f3-8247ae0e46cc","Type":"ContainerStarted","Data":"f1391f198bbb2adeeea1f5c8a5548c186c4cc57e19e6dd816b8890f1380cf087"} Nov 26 14:41:22 crc kubenswrapper[5037]: I1126 14:41:22.972463 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-64b48fff64-cppwc" Nov 26 14:41:22 crc kubenswrapper[5037]: I1126 14:41:22.972794 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-7f55999cfc-jx9r6" Nov 26 14:41:22 crc kubenswrapper[5037]: I1126 14:41:22.995771 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-7f55999cfc-jx9r6" podStartSLOduration=1.995751373 podStartE2EDuration="1.995751373s" podCreationTimestamp="2025-11-26 14:41:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:41:22.994629736 +0000 UTC m=+1549.791399930" watchObservedRunningTime="2025-11-26 14:41:22.995751373 +0000 UTC m=+1549.792521557" Nov 26 14:41:23 crc kubenswrapper[5037]: I1126 14:41:23.022707 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-64b48fff64-cppwc"] Nov 26 14:41:23 crc kubenswrapper[5037]: I1126 14:41:23.030324 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-64b48fff64-cppwc"] Nov 26 14:41:23 crc kubenswrapper[5037]: I1126 14:41:23.169014 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 26 14:41:23 crc kubenswrapper[5037]: I1126 14:41:23.922441 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="298b5c3b-8afb-4805-90cc-6e13fa47f559" path="/var/lib/kubelet/pods/298b5c3b-8afb-4805-90cc-6e13fa47f559/volumes" Nov 26 14:41:23 crc kubenswrapper[5037]: I1126 14:41:23.983147 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-7f55999cfc-jx9r6" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.085574 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-5zbnt"] Nov 26 14:41:26 crc kubenswrapper[5037]: E1126 14:41:26.086790 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="298b5c3b-8afb-4805-90cc-6e13fa47f559" containerName="neutron-httpd" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.086811 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="298b5c3b-8afb-4805-90cc-6e13fa47f559" containerName="neutron-httpd" Nov 26 14:41:26 crc kubenswrapper[5037]: E1126 14:41:26.086843 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="298b5c3b-8afb-4805-90cc-6e13fa47f559" containerName="neutron-api" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.086853 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="298b5c3b-8afb-4805-90cc-6e13fa47f559" containerName="neutron-api" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.087225 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="298b5c3b-8afb-4805-90cc-6e13fa47f559" containerName="neutron-api" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.087252 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="298b5c3b-8afb-4805-90cc-6e13fa47f559" containerName="neutron-httpd" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.088676 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-5zbnt" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.102697 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-5zbnt"] Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.174583 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-eb2b-account-create-update-92xtm"] Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.178137 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-eb2b-account-create-update-92xtm" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.180671 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.198815 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-eb2b-account-create-update-92xtm"] Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.230217 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hc458\" (UniqueName: \"kubernetes.io/projected/86867a49-37d5-4289-a31e-8eed1257c87a-kube-api-access-hc458\") pod \"nova-api-db-create-5zbnt\" (UID: \"86867a49-37d5-4289-a31e-8eed1257c87a\") " pod="openstack/nova-api-db-create-5zbnt" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.230577 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ab0d1aa1-4e89-4fa1-a06d-a199ed98670f-operator-scripts\") pod \"nova-api-eb2b-account-create-update-92xtm\" (UID: \"ab0d1aa1-4e89-4fa1-a06d-a199ed98670f\") " pod="openstack/nova-api-eb2b-account-create-update-92xtm" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.230787 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/86867a49-37d5-4289-a31e-8eed1257c87a-operator-scripts\") pod \"nova-api-db-create-5zbnt\" (UID: \"86867a49-37d5-4289-a31e-8eed1257c87a\") " pod="openstack/nova-api-db-create-5zbnt" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.230829 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kpzj7\" (UniqueName: \"kubernetes.io/projected/ab0d1aa1-4e89-4fa1-a06d-a199ed98670f-kube-api-access-kpzj7\") pod \"nova-api-eb2b-account-create-update-92xtm\" (UID: \"ab0d1aa1-4e89-4fa1-a06d-a199ed98670f\") " pod="openstack/nova-api-eb2b-account-create-update-92xtm" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.280830 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-m4hls"] Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.281936 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-m4hls" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.306914 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-m4hls"] Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.332550 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b9a76d5f-5242-4fbf-a824-970d9ffcc3ad-operator-scripts\") pod \"nova-cell0-db-create-m4hls\" (UID: \"b9a76d5f-5242-4fbf-a824-970d9ffcc3ad\") " pod="openstack/nova-cell0-db-create-m4hls" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.332603 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ft6mc\" (UniqueName: \"kubernetes.io/projected/b9a76d5f-5242-4fbf-a824-970d9ffcc3ad-kube-api-access-ft6mc\") pod \"nova-cell0-db-create-m4hls\" (UID: \"b9a76d5f-5242-4fbf-a824-970d9ffcc3ad\") " pod="openstack/nova-cell0-db-create-m4hls" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.332713 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/86867a49-37d5-4289-a31e-8eed1257c87a-operator-scripts\") pod \"nova-api-db-create-5zbnt\" (UID: \"86867a49-37d5-4289-a31e-8eed1257c87a\") " pod="openstack/nova-api-db-create-5zbnt" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.332757 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kpzj7\" (UniqueName: \"kubernetes.io/projected/ab0d1aa1-4e89-4fa1-a06d-a199ed98670f-kube-api-access-kpzj7\") pod \"nova-api-eb2b-account-create-update-92xtm\" (UID: \"ab0d1aa1-4e89-4fa1-a06d-a199ed98670f\") " pod="openstack/nova-api-eb2b-account-create-update-92xtm" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.332812 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hc458\" (UniqueName: \"kubernetes.io/projected/86867a49-37d5-4289-a31e-8eed1257c87a-kube-api-access-hc458\") pod \"nova-api-db-create-5zbnt\" (UID: \"86867a49-37d5-4289-a31e-8eed1257c87a\") " pod="openstack/nova-api-db-create-5zbnt" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.332857 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ab0d1aa1-4e89-4fa1-a06d-a199ed98670f-operator-scripts\") pod \"nova-api-eb2b-account-create-update-92xtm\" (UID: \"ab0d1aa1-4e89-4fa1-a06d-a199ed98670f\") " pod="openstack/nova-api-eb2b-account-create-update-92xtm" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.333625 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/86867a49-37d5-4289-a31e-8eed1257c87a-operator-scripts\") pod \"nova-api-db-create-5zbnt\" (UID: \"86867a49-37d5-4289-a31e-8eed1257c87a\") " pod="openstack/nova-api-db-create-5zbnt" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.334388 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ab0d1aa1-4e89-4fa1-a06d-a199ed98670f-operator-scripts\") pod \"nova-api-eb2b-account-create-update-92xtm\" (UID: \"ab0d1aa1-4e89-4fa1-a06d-a199ed98670f\") " pod="openstack/nova-api-eb2b-account-create-update-92xtm" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.360984 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kpzj7\" (UniqueName: \"kubernetes.io/projected/ab0d1aa1-4e89-4fa1-a06d-a199ed98670f-kube-api-access-kpzj7\") pod \"nova-api-eb2b-account-create-update-92xtm\" (UID: \"ab0d1aa1-4e89-4fa1-a06d-a199ed98670f\") " pod="openstack/nova-api-eb2b-account-create-update-92xtm" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.371080 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hc458\" (UniqueName: \"kubernetes.io/projected/86867a49-37d5-4289-a31e-8eed1257c87a-kube-api-access-hc458\") pod \"nova-api-db-create-5zbnt\" (UID: \"86867a49-37d5-4289-a31e-8eed1257c87a\") " pod="openstack/nova-api-db-create-5zbnt" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.385895 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-j65gt"] Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.387430 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-j65gt" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.397511 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-e6d6-account-create-update-jg9lh"] Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.398879 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-e6d6-account-create-update-jg9lh" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.406496 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-j65gt"] Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.406817 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.417921 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-e6d6-account-create-update-jg9lh"] Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.429890 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-5zbnt" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.435199 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vdg42\" (UniqueName: \"kubernetes.io/projected/b5f1c1b7-8cc8-4be5-893b-83d6ee0e5a79-kube-api-access-vdg42\") pod \"nova-cell1-db-create-j65gt\" (UID: \"b5f1c1b7-8cc8-4be5-893b-83d6ee0e5a79\") " pod="openstack/nova-cell1-db-create-j65gt" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.435390 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b5f1c1b7-8cc8-4be5-893b-83d6ee0e5a79-operator-scripts\") pod \"nova-cell1-db-create-j65gt\" (UID: \"b5f1c1b7-8cc8-4be5-893b-83d6ee0e5a79\") " pod="openstack/nova-cell1-db-create-j65gt" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.435435 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b9a76d5f-5242-4fbf-a824-970d9ffcc3ad-operator-scripts\") pod \"nova-cell0-db-create-m4hls\" (UID: \"b9a76d5f-5242-4fbf-a824-970d9ffcc3ad\") " pod="openstack/nova-cell0-db-create-m4hls" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.435466 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ft6mc\" (UniqueName: \"kubernetes.io/projected/b9a76d5f-5242-4fbf-a824-970d9ffcc3ad-kube-api-access-ft6mc\") pod \"nova-cell0-db-create-m4hls\" (UID: \"b9a76d5f-5242-4fbf-a824-970d9ffcc3ad\") " pod="openstack/nova-cell0-db-create-m4hls" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.440007 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b9a76d5f-5242-4fbf-a824-970d9ffcc3ad-operator-scripts\") pod \"nova-cell0-db-create-m4hls\" (UID: \"b9a76d5f-5242-4fbf-a824-970d9ffcc3ad\") " pod="openstack/nova-cell0-db-create-m4hls" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.465278 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ft6mc\" (UniqueName: \"kubernetes.io/projected/b9a76d5f-5242-4fbf-a824-970d9ffcc3ad-kube-api-access-ft6mc\") pod \"nova-cell0-db-create-m4hls\" (UID: \"b9a76d5f-5242-4fbf-a824-970d9ffcc3ad\") " pod="openstack/nova-cell0-db-create-m4hls" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.497019 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-eb2b-account-create-update-92xtm" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.537157 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b5f1c1b7-8cc8-4be5-893b-83d6ee0e5a79-operator-scripts\") pod \"nova-cell1-db-create-j65gt\" (UID: \"b5f1c1b7-8cc8-4be5-893b-83d6ee0e5a79\") " pod="openstack/nova-cell1-db-create-j65gt" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.537220 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mgmrq\" (UniqueName: \"kubernetes.io/projected/ece5ba73-c8c1-4d22-ac0c-411c8c59e969-kube-api-access-mgmrq\") pod \"nova-cell0-e6d6-account-create-update-jg9lh\" (UID: \"ece5ba73-c8c1-4d22-ac0c-411c8c59e969\") " pod="openstack/nova-cell0-e6d6-account-create-update-jg9lh" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.537302 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ece5ba73-c8c1-4d22-ac0c-411c8c59e969-operator-scripts\") pod \"nova-cell0-e6d6-account-create-update-jg9lh\" (UID: \"ece5ba73-c8c1-4d22-ac0c-411c8c59e969\") " pod="openstack/nova-cell0-e6d6-account-create-update-jg9lh" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.537355 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vdg42\" (UniqueName: \"kubernetes.io/projected/b5f1c1b7-8cc8-4be5-893b-83d6ee0e5a79-kube-api-access-vdg42\") pod \"nova-cell1-db-create-j65gt\" (UID: \"b5f1c1b7-8cc8-4be5-893b-83d6ee0e5a79\") " pod="openstack/nova-cell1-db-create-j65gt" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.538262 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b5f1c1b7-8cc8-4be5-893b-83d6ee0e5a79-operator-scripts\") pod \"nova-cell1-db-create-j65gt\" (UID: \"b5f1c1b7-8cc8-4be5-893b-83d6ee0e5a79\") " pod="openstack/nova-cell1-db-create-j65gt" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.558548 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vdg42\" (UniqueName: \"kubernetes.io/projected/b5f1c1b7-8cc8-4be5-893b-83d6ee0e5a79-kube-api-access-vdg42\") pod \"nova-cell1-db-create-j65gt\" (UID: \"b5f1c1b7-8cc8-4be5-893b-83d6ee0e5a79\") " pod="openstack/nova-cell1-db-create-j65gt" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.584424 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-2e29-account-create-update-lrvct"] Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.586075 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-2e29-account-create-update-lrvct" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.589004 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.600432 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-2e29-account-create-update-lrvct"] Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.609803 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-m4hls" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.639730 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2j6zx\" (UniqueName: \"kubernetes.io/projected/56e600a6-1625-4844-9a51-da79b454cd34-kube-api-access-2j6zx\") pod \"nova-cell1-2e29-account-create-update-lrvct\" (UID: \"56e600a6-1625-4844-9a51-da79b454cd34\") " pod="openstack/nova-cell1-2e29-account-create-update-lrvct" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.639814 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/56e600a6-1625-4844-9a51-da79b454cd34-operator-scripts\") pod \"nova-cell1-2e29-account-create-update-lrvct\" (UID: \"56e600a6-1625-4844-9a51-da79b454cd34\") " pod="openstack/nova-cell1-2e29-account-create-update-lrvct" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.639896 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mgmrq\" (UniqueName: \"kubernetes.io/projected/ece5ba73-c8c1-4d22-ac0c-411c8c59e969-kube-api-access-mgmrq\") pod \"nova-cell0-e6d6-account-create-update-jg9lh\" (UID: \"ece5ba73-c8c1-4d22-ac0c-411c8c59e969\") " pod="openstack/nova-cell0-e6d6-account-create-update-jg9lh" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.639957 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ece5ba73-c8c1-4d22-ac0c-411c8c59e969-operator-scripts\") pod \"nova-cell0-e6d6-account-create-update-jg9lh\" (UID: \"ece5ba73-c8c1-4d22-ac0c-411c8c59e969\") " pod="openstack/nova-cell0-e6d6-account-create-update-jg9lh" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.640763 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ece5ba73-c8c1-4d22-ac0c-411c8c59e969-operator-scripts\") pod \"nova-cell0-e6d6-account-create-update-jg9lh\" (UID: \"ece5ba73-c8c1-4d22-ac0c-411c8c59e969\") " pod="openstack/nova-cell0-e6d6-account-create-update-jg9lh" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.656813 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mgmrq\" (UniqueName: \"kubernetes.io/projected/ece5ba73-c8c1-4d22-ac0c-411c8c59e969-kube-api-access-mgmrq\") pod \"nova-cell0-e6d6-account-create-update-jg9lh\" (UID: \"ece5ba73-c8c1-4d22-ac0c-411c8c59e969\") " pod="openstack/nova-cell0-e6d6-account-create-update-jg9lh" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.741884 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/56e600a6-1625-4844-9a51-da79b454cd34-operator-scripts\") pod \"nova-cell1-2e29-account-create-update-lrvct\" (UID: \"56e600a6-1625-4844-9a51-da79b454cd34\") " pod="openstack/nova-cell1-2e29-account-create-update-lrvct" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.742043 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2j6zx\" (UniqueName: \"kubernetes.io/projected/56e600a6-1625-4844-9a51-da79b454cd34-kube-api-access-2j6zx\") pod \"nova-cell1-2e29-account-create-update-lrvct\" (UID: \"56e600a6-1625-4844-9a51-da79b454cd34\") " pod="openstack/nova-cell1-2e29-account-create-update-lrvct" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.743187 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/56e600a6-1625-4844-9a51-da79b454cd34-operator-scripts\") pod \"nova-cell1-2e29-account-create-update-lrvct\" (UID: \"56e600a6-1625-4844-9a51-da79b454cd34\") " pod="openstack/nova-cell1-2e29-account-create-update-lrvct" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.759774 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2j6zx\" (UniqueName: \"kubernetes.io/projected/56e600a6-1625-4844-9a51-da79b454cd34-kube-api-access-2j6zx\") pod \"nova-cell1-2e29-account-create-update-lrvct\" (UID: \"56e600a6-1625-4844-9a51-da79b454cd34\") " pod="openstack/nova-cell1-2e29-account-create-update-lrvct" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.815274 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-j65gt" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.822994 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-e6d6-account-create-update-jg9lh" Nov 26 14:41:26 crc kubenswrapper[5037]: I1126 14:41:26.914800 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-2e29-account-create-update-lrvct" Nov 26 14:41:27 crc kubenswrapper[5037]: I1126 14:41:27.773731 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="8ec55994-5e0e-4f1e-aece-501836b46c63" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.158:3000/\": dial tcp 10.217.0.158:3000: connect: connection refused" Nov 26 14:41:28 crc kubenswrapper[5037]: I1126 14:41:28.407048 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 26 14:41:28 crc kubenswrapper[5037]: I1126 14:41:28.899889 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 14:41:28 crc kubenswrapper[5037]: I1126 14:41:28.900500 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="51577a49-9ff5-419f-ade6-6b5a9abbf7c0" containerName="glance-log" containerID="cri-o://5f860a50e8a312cb271e8663accd0b603f1a9197ed83358443cce54a067f67bc" gracePeriod=30 Nov 26 14:41:28 crc kubenswrapper[5037]: I1126 14:41:28.900942 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="51577a49-9ff5-419f-ade6-6b5a9abbf7c0" containerName="glance-httpd" containerID="cri-o://979e78ffbe0bc4990e5111e8c7519d935e4c9ab922a1af6d9c17cf6aff56f82d" gracePeriod=30 Nov 26 14:41:29 crc kubenswrapper[5037]: I1126 14:41:29.101554 5037 generic.go:334] "Generic (PLEG): container finished" podID="51577a49-9ff5-419f-ade6-6b5a9abbf7c0" containerID="5f860a50e8a312cb271e8663accd0b603f1a9197ed83358443cce54a067f67bc" exitCode=143 Nov 26 14:41:29 crc kubenswrapper[5037]: I1126 14:41:29.101895 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"51577a49-9ff5-419f-ade6-6b5a9abbf7c0","Type":"ContainerDied","Data":"5f860a50e8a312cb271e8663accd0b603f1a9197ed83358443cce54a067f67bc"} Nov 26 14:41:29 crc kubenswrapper[5037]: I1126 14:41:29.377077 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 14:41:29 crc kubenswrapper[5037]: I1126 14:41:29.492212 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8ec55994-5e0e-4f1e-aece-501836b46c63-run-httpd\") pod \"8ec55994-5e0e-4f1e-aece-501836b46c63\" (UID: \"8ec55994-5e0e-4f1e-aece-501836b46c63\") " Nov 26 14:41:29 crc kubenswrapper[5037]: I1126 14:41:29.492251 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9wdh4\" (UniqueName: \"kubernetes.io/projected/8ec55994-5e0e-4f1e-aece-501836b46c63-kube-api-access-9wdh4\") pod \"8ec55994-5e0e-4f1e-aece-501836b46c63\" (UID: \"8ec55994-5e0e-4f1e-aece-501836b46c63\") " Nov 26 14:41:29 crc kubenswrapper[5037]: I1126 14:41:29.492341 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8ec55994-5e0e-4f1e-aece-501836b46c63-log-httpd\") pod \"8ec55994-5e0e-4f1e-aece-501836b46c63\" (UID: \"8ec55994-5e0e-4f1e-aece-501836b46c63\") " Nov 26 14:41:29 crc kubenswrapper[5037]: I1126 14:41:29.492406 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8ec55994-5e0e-4f1e-aece-501836b46c63-sg-core-conf-yaml\") pod \"8ec55994-5e0e-4f1e-aece-501836b46c63\" (UID: \"8ec55994-5e0e-4f1e-aece-501836b46c63\") " Nov 26 14:41:29 crc kubenswrapper[5037]: I1126 14:41:29.492465 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ec55994-5e0e-4f1e-aece-501836b46c63-combined-ca-bundle\") pod \"8ec55994-5e0e-4f1e-aece-501836b46c63\" (UID: \"8ec55994-5e0e-4f1e-aece-501836b46c63\") " Nov 26 14:41:29 crc kubenswrapper[5037]: I1126 14:41:29.492492 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ec55994-5e0e-4f1e-aece-501836b46c63-config-data\") pod \"8ec55994-5e0e-4f1e-aece-501836b46c63\" (UID: \"8ec55994-5e0e-4f1e-aece-501836b46c63\") " Nov 26 14:41:29 crc kubenswrapper[5037]: I1126 14:41:29.492530 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8ec55994-5e0e-4f1e-aece-501836b46c63-scripts\") pod \"8ec55994-5e0e-4f1e-aece-501836b46c63\" (UID: \"8ec55994-5e0e-4f1e-aece-501836b46c63\") " Nov 26 14:41:29 crc kubenswrapper[5037]: I1126 14:41:29.493271 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8ec55994-5e0e-4f1e-aece-501836b46c63-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "8ec55994-5e0e-4f1e-aece-501836b46c63" (UID: "8ec55994-5e0e-4f1e-aece-501836b46c63"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:41:29 crc kubenswrapper[5037]: I1126 14:41:29.493412 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8ec55994-5e0e-4f1e-aece-501836b46c63-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "8ec55994-5e0e-4f1e-aece-501836b46c63" (UID: "8ec55994-5e0e-4f1e-aece-501836b46c63"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:41:29 crc kubenswrapper[5037]: I1126 14:41:29.497676 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ec55994-5e0e-4f1e-aece-501836b46c63-scripts" (OuterVolumeSpecName: "scripts") pod "8ec55994-5e0e-4f1e-aece-501836b46c63" (UID: "8ec55994-5e0e-4f1e-aece-501836b46c63"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:29 crc kubenswrapper[5037]: I1126 14:41:29.501027 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ec55994-5e0e-4f1e-aece-501836b46c63-kube-api-access-9wdh4" (OuterVolumeSpecName: "kube-api-access-9wdh4") pod "8ec55994-5e0e-4f1e-aece-501836b46c63" (UID: "8ec55994-5e0e-4f1e-aece-501836b46c63"). InnerVolumeSpecName "kube-api-access-9wdh4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:41:29 crc kubenswrapper[5037]: I1126 14:41:29.520160 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ec55994-5e0e-4f1e-aece-501836b46c63-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "8ec55994-5e0e-4f1e-aece-501836b46c63" (UID: "8ec55994-5e0e-4f1e-aece-501836b46c63"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:29 crc kubenswrapper[5037]: I1126 14:41:29.579937 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-e6d6-account-create-update-jg9lh"] Nov 26 14:41:29 crc kubenswrapper[5037]: I1126 14:41:29.594630 5037 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8ec55994-5e0e-4f1e-aece-501836b46c63-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:29 crc kubenswrapper[5037]: I1126 14:41:29.594662 5037 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8ec55994-5e0e-4f1e-aece-501836b46c63-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:29 crc kubenswrapper[5037]: I1126 14:41:29.594672 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9wdh4\" (UniqueName: \"kubernetes.io/projected/8ec55994-5e0e-4f1e-aece-501836b46c63-kube-api-access-9wdh4\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:29 crc kubenswrapper[5037]: I1126 14:41:29.594682 5037 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8ec55994-5e0e-4f1e-aece-501836b46c63-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:29 crc kubenswrapper[5037]: I1126 14:41:29.594691 5037 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8ec55994-5e0e-4f1e-aece-501836b46c63-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:29 crc kubenswrapper[5037]: I1126 14:41:29.595972 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ec55994-5e0e-4f1e-aece-501836b46c63-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8ec55994-5e0e-4f1e-aece-501836b46c63" (UID: "8ec55994-5e0e-4f1e-aece-501836b46c63"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:29 crc kubenswrapper[5037]: I1126 14:41:29.598203 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ec55994-5e0e-4f1e-aece-501836b46c63-config-data" (OuterVolumeSpecName: "config-data") pod "8ec55994-5e0e-4f1e-aece-501836b46c63" (UID: "8ec55994-5e0e-4f1e-aece-501836b46c63"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:29 crc kubenswrapper[5037]: I1126 14:41:29.696492 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ec55994-5e0e-4f1e-aece-501836b46c63-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:29 crc kubenswrapper[5037]: I1126 14:41:29.696831 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ec55994-5e0e-4f1e-aece-501836b46c63-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:29 crc kubenswrapper[5037]: I1126 14:41:29.723255 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-2e29-account-create-update-lrvct"] Nov 26 14:41:29 crc kubenswrapper[5037]: W1126 14:41:29.732449 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod56e600a6_1625_4844_9a51_da79b454cd34.slice/crio-b46f5e97ea8d35075c9f2573888c41b3505f1d9189a8d72bbaecfccf51d8e784 WatchSource:0}: Error finding container b46f5e97ea8d35075c9f2573888c41b3505f1d9189a8d72bbaecfccf51d8e784: Status 404 returned error can't find the container with id b46f5e97ea8d35075c9f2573888c41b3505f1d9189a8d72bbaecfccf51d8e784 Nov 26 14:41:29 crc kubenswrapper[5037]: I1126 14:41:29.734691 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-m4hls"] Nov 26 14:41:29 crc kubenswrapper[5037]: I1126 14:41:29.757024 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-eb2b-account-create-update-92xtm"] Nov 26 14:41:29 crc kubenswrapper[5037]: I1126 14:41:29.769114 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-j65gt"] Nov 26 14:41:29 crc kubenswrapper[5037]: I1126 14:41:29.932057 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-5zbnt"] Nov 26 14:41:29 crc kubenswrapper[5037]: W1126 14:41:29.937538 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod86867a49_37d5_4289_a31e_8eed1257c87a.slice/crio-0de3f3e15fa11c2aaa7a60fc97b70425ee848f36ac666681f5fd19972ceb4284 WatchSource:0}: Error finding container 0de3f3e15fa11c2aaa7a60fc97b70425ee848f36ac666681f5fd19972ceb4284: Status 404 returned error can't find the container with id 0de3f3e15fa11c2aaa7a60fc97b70425ee848f36ac666681f5fd19972ceb4284 Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.116329 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-j65gt" event={"ID":"b5f1c1b7-8cc8-4be5-893b-83d6ee0e5a79","Type":"ContainerStarted","Data":"f01a478a7f8e4f6c00e9322b70050ee6b5c52b4e04e666254402374cc97cb01d"} Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.119605 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8ec55994-5e0e-4f1e-aece-501836b46c63","Type":"ContainerDied","Data":"ac0bb50783c69d6f012d53c2003400e9b811eb2df950f24d895fc55e4d2f4efa"} Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.119652 5037 scope.go:117] "RemoveContainer" containerID="a636eb1c52ba5fb1a646ed001892d91916061ddd778605fd9b8ccd3a6c44f12c" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.119772 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.122215 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"e7fcafe6-2e7b-4893-84bc-5a3be7029ef7","Type":"ContainerStarted","Data":"963cb889a355aaf5eaa5d102c5937c4f8735b969d035ba5db0079e3607909577"} Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.125734 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-2e29-account-create-update-lrvct" event={"ID":"56e600a6-1625-4844-9a51-da79b454cd34","Type":"ContainerStarted","Data":"1a37e041cfbebe66b77bc0f18efdcf4b5deb2e9cf0cd0c8a577ccb16cb667c59"} Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.125766 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-2e29-account-create-update-lrvct" event={"ID":"56e600a6-1625-4844-9a51-da79b454cd34","Type":"ContainerStarted","Data":"b46f5e97ea8d35075c9f2573888c41b3505f1d9189a8d72bbaecfccf51d8e784"} Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.135810 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-5zbnt" event={"ID":"86867a49-37d5-4289-a31e-8eed1257c87a","Type":"ContainerStarted","Data":"0de3f3e15fa11c2aaa7a60fc97b70425ee848f36ac666681f5fd19972ceb4284"} Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.138857 5037 generic.go:334] "Generic (PLEG): container finished" podID="ece5ba73-c8c1-4d22-ac0c-411c8c59e969" containerID="6161ba904919c36fc996416a539e7b9532a492480ed49709a6366bb545af200e" exitCode=0 Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.138974 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-e6d6-account-create-update-jg9lh" event={"ID":"ece5ba73-c8c1-4d22-ac0c-411c8c59e969","Type":"ContainerDied","Data":"6161ba904919c36fc996416a539e7b9532a492480ed49709a6366bb545af200e"} Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.139007 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-e6d6-account-create-update-jg9lh" event={"ID":"ece5ba73-c8c1-4d22-ac0c-411c8c59e969","Type":"ContainerStarted","Data":"7fc0e12b4ef9fc9ff988f5f825f879bed93abc06b85b92a6371d028dc443e43a"} Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.140547 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-2e29-account-create-update-lrvct" podStartSLOduration=4.140533224 podStartE2EDuration="4.140533224s" podCreationTimestamp="2025-11-26 14:41:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:41:30.138418122 +0000 UTC m=+1556.935188316" watchObservedRunningTime="2025-11-26 14:41:30.140533224 +0000 UTC m=+1556.937303428" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.143905 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-eb2b-account-create-update-92xtm" event={"ID":"ab0d1aa1-4e89-4fa1-a06d-a199ed98670f","Type":"ContainerStarted","Data":"7f990c78a7c584a8186f85ac8606b9254f680b0f3bb403c3bcef3257ddcc3c04"} Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.145233 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-m4hls" event={"ID":"b9a76d5f-5242-4fbf-a824-970d9ffcc3ad","Type":"ContainerStarted","Data":"6c5a997620846ace07292574a12b8476eab5ce27fac1929eff03c5cf4273334d"} Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.145256 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-m4hls" event={"ID":"b9a76d5f-5242-4fbf-a824-970d9ffcc3ad","Type":"ContainerStarted","Data":"381b099e5d7bcaa2c2e0ade4b111b3c5f117147876f64712660d93511962014a"} Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.161144 5037 scope.go:117] "RemoveContainer" containerID="1cac0a928a182d949345b3bf28a7eb70e03e598482ad70f0156fa523bf850643" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.162794 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.037185898 podStartE2EDuration="15.162772897s" podCreationTimestamp="2025-11-26 14:41:15 +0000 UTC" firstStartedPulling="2025-11-26 14:41:15.923227238 +0000 UTC m=+1542.719997422" lastFinishedPulling="2025-11-26 14:41:29.048814237 +0000 UTC m=+1555.845584421" observedRunningTime="2025-11-26 14:41:30.153448869 +0000 UTC m=+1556.950219073" watchObservedRunningTime="2025-11-26 14:41:30.162772897 +0000 UTC m=+1556.959543081" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.185164 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-db-create-m4hls" podStartSLOduration=4.185140723 podStartE2EDuration="4.185140723s" podCreationTimestamp="2025-11-26 14:41:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:41:30.165279707 +0000 UTC m=+1556.962049891" watchObservedRunningTime="2025-11-26 14:41:30.185140723 +0000 UTC m=+1556.981910907" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.191117 5037 scope.go:117] "RemoveContainer" containerID="77e3b705d52ae0fd1eec5a8ec00e21c0da85ce8a8b08e853102e7ce45712d1e1" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.198829 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.219815 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.255880 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:41:30 crc kubenswrapper[5037]: E1126 14:41:30.256342 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ec55994-5e0e-4f1e-aece-501836b46c63" containerName="sg-core" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.256353 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ec55994-5e0e-4f1e-aece-501836b46c63" containerName="sg-core" Nov 26 14:41:30 crc kubenswrapper[5037]: E1126 14:41:30.256369 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ec55994-5e0e-4f1e-aece-501836b46c63" containerName="proxy-httpd" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.256375 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ec55994-5e0e-4f1e-aece-501836b46c63" containerName="proxy-httpd" Nov 26 14:41:30 crc kubenswrapper[5037]: E1126 14:41:30.256397 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ec55994-5e0e-4f1e-aece-501836b46c63" containerName="ceilometer-central-agent" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.256403 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ec55994-5e0e-4f1e-aece-501836b46c63" containerName="ceilometer-central-agent" Nov 26 14:41:30 crc kubenswrapper[5037]: E1126 14:41:30.256419 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ec55994-5e0e-4f1e-aece-501836b46c63" containerName="ceilometer-notification-agent" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.256424 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ec55994-5e0e-4f1e-aece-501836b46c63" containerName="ceilometer-notification-agent" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.256605 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ec55994-5e0e-4f1e-aece-501836b46c63" containerName="ceilometer-central-agent" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.256615 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ec55994-5e0e-4f1e-aece-501836b46c63" containerName="ceilometer-notification-agent" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.256628 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ec55994-5e0e-4f1e-aece-501836b46c63" containerName="sg-core" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.256638 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ec55994-5e0e-4f1e-aece-501836b46c63" containerName="proxy-httpd" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.258223 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.260345 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.260539 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.264747 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.308310 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-config-data\") pod \"ceilometer-0\" (UID: \"fcecbf16-4ee2-4c4e-93d3-9013208af0c3\") " pod="openstack/ceilometer-0" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.308368 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-scripts\") pod \"ceilometer-0\" (UID: \"fcecbf16-4ee2-4c4e-93d3-9013208af0c3\") " pod="openstack/ceilometer-0" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.308412 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fcecbf16-4ee2-4c4e-93d3-9013208af0c3\") " pod="openstack/ceilometer-0" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.308491 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-log-httpd\") pod \"ceilometer-0\" (UID: \"fcecbf16-4ee2-4c4e-93d3-9013208af0c3\") " pod="openstack/ceilometer-0" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.308557 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-run-httpd\") pod \"ceilometer-0\" (UID: \"fcecbf16-4ee2-4c4e-93d3-9013208af0c3\") " pod="openstack/ceilometer-0" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.308617 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-92jpf\" (UniqueName: \"kubernetes.io/projected/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-kube-api-access-92jpf\") pod \"ceilometer-0\" (UID: \"fcecbf16-4ee2-4c4e-93d3-9013208af0c3\") " pod="openstack/ceilometer-0" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.308687 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fcecbf16-4ee2-4c4e-93d3-9013208af0c3\") " pod="openstack/ceilometer-0" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.311085 5037 scope.go:117] "RemoveContainer" containerID="ecfe02c1dc20c77b83de3aad46edd5f2d8609268e30702f511a086bdc4e1820b" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.410365 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-92jpf\" (UniqueName: \"kubernetes.io/projected/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-kube-api-access-92jpf\") pod \"ceilometer-0\" (UID: \"fcecbf16-4ee2-4c4e-93d3-9013208af0c3\") " pod="openstack/ceilometer-0" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.410439 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fcecbf16-4ee2-4c4e-93d3-9013208af0c3\") " pod="openstack/ceilometer-0" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.410572 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-config-data\") pod \"ceilometer-0\" (UID: \"fcecbf16-4ee2-4c4e-93d3-9013208af0c3\") " pod="openstack/ceilometer-0" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.410596 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-scripts\") pod \"ceilometer-0\" (UID: \"fcecbf16-4ee2-4c4e-93d3-9013208af0c3\") " pod="openstack/ceilometer-0" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.410628 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fcecbf16-4ee2-4c4e-93d3-9013208af0c3\") " pod="openstack/ceilometer-0" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.410649 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-log-httpd\") pod \"ceilometer-0\" (UID: \"fcecbf16-4ee2-4c4e-93d3-9013208af0c3\") " pod="openstack/ceilometer-0" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.410679 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-run-httpd\") pod \"ceilometer-0\" (UID: \"fcecbf16-4ee2-4c4e-93d3-9013208af0c3\") " pod="openstack/ceilometer-0" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.411647 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-log-httpd\") pod \"ceilometer-0\" (UID: \"fcecbf16-4ee2-4c4e-93d3-9013208af0c3\") " pod="openstack/ceilometer-0" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.411710 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-run-httpd\") pod \"ceilometer-0\" (UID: \"fcecbf16-4ee2-4c4e-93d3-9013208af0c3\") " pod="openstack/ceilometer-0" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.433681 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fcecbf16-4ee2-4c4e-93d3-9013208af0c3\") " pod="openstack/ceilometer-0" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.437918 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fcecbf16-4ee2-4c4e-93d3-9013208af0c3\") " pod="openstack/ceilometer-0" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.439182 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-config-data\") pod \"ceilometer-0\" (UID: \"fcecbf16-4ee2-4c4e-93d3-9013208af0c3\") " pod="openstack/ceilometer-0" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.463243 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-scripts\") pod \"ceilometer-0\" (UID: \"fcecbf16-4ee2-4c4e-93d3-9013208af0c3\") " pod="openstack/ceilometer-0" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.469988 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-92jpf\" (UniqueName: \"kubernetes.io/projected/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-kube-api-access-92jpf\") pod \"ceilometer-0\" (UID: \"fcecbf16-4ee2-4c4e-93d3-9013208af0c3\") " pod="openstack/ceilometer-0" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.604514 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.829505 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.830205 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="217b743f-dd2d-4fac-b61e-4ecd43e540d4" containerName="glance-log" containerID="cri-o://e45352dde5f5b9a92cfe20673f8c8ed00298f5bee8f04018fad41a90935adb7e" gracePeriod=30 Nov 26 14:41:30 crc kubenswrapper[5037]: I1126 14:41:30.831230 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="217b743f-dd2d-4fac-b61e-4ecd43e540d4" containerName="glance-httpd" containerID="cri-o://2ade519bc8f930f63a7e98b174c9ebeb57533c95227f89b8a7c1a16fc7313c88" gracePeriod=30 Nov 26 14:41:31 crc kubenswrapper[5037]: I1126 14:41:31.087736 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:41:31 crc kubenswrapper[5037]: I1126 14:41:31.155054 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fcecbf16-4ee2-4c4e-93d3-9013208af0c3","Type":"ContainerStarted","Data":"1bd028e209776c80516c3bab85d08d487d8052e74973ef25a86ab924ac7f95c9"} Nov 26 14:41:31 crc kubenswrapper[5037]: I1126 14:41:31.157736 5037 generic.go:334] "Generic (PLEG): container finished" podID="b9a76d5f-5242-4fbf-a824-970d9ffcc3ad" containerID="6c5a997620846ace07292574a12b8476eab5ce27fac1929eff03c5cf4273334d" exitCode=0 Nov 26 14:41:31 crc kubenswrapper[5037]: I1126 14:41:31.157873 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-m4hls" event={"ID":"b9a76d5f-5242-4fbf-a824-970d9ffcc3ad","Type":"ContainerDied","Data":"6c5a997620846ace07292574a12b8476eab5ce27fac1929eff03c5cf4273334d"} Nov 26 14:41:31 crc kubenswrapper[5037]: I1126 14:41:31.161653 5037 generic.go:334] "Generic (PLEG): container finished" podID="b5f1c1b7-8cc8-4be5-893b-83d6ee0e5a79" containerID="94d7a45db71326af06b4b972ad67c72425bf8cc0bf7bea66342eaa2f1e0e7d7b" exitCode=0 Nov 26 14:41:31 crc kubenswrapper[5037]: I1126 14:41:31.161785 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-j65gt" event={"ID":"b5f1c1b7-8cc8-4be5-893b-83d6ee0e5a79","Type":"ContainerDied","Data":"94d7a45db71326af06b4b972ad67c72425bf8cc0bf7bea66342eaa2f1e0e7d7b"} Nov 26 14:41:31 crc kubenswrapper[5037]: I1126 14:41:31.166352 5037 generic.go:334] "Generic (PLEG): container finished" podID="217b743f-dd2d-4fac-b61e-4ecd43e540d4" containerID="e45352dde5f5b9a92cfe20673f8c8ed00298f5bee8f04018fad41a90935adb7e" exitCode=143 Nov 26 14:41:31 crc kubenswrapper[5037]: I1126 14:41:31.166470 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"217b743f-dd2d-4fac-b61e-4ecd43e540d4","Type":"ContainerDied","Data":"e45352dde5f5b9a92cfe20673f8c8ed00298f5bee8f04018fad41a90935adb7e"} Nov 26 14:41:31 crc kubenswrapper[5037]: I1126 14:41:31.168148 5037 generic.go:334] "Generic (PLEG): container finished" podID="56e600a6-1625-4844-9a51-da79b454cd34" containerID="1a37e041cfbebe66b77bc0f18efdcf4b5deb2e9cf0cd0c8a577ccb16cb667c59" exitCode=0 Nov 26 14:41:31 crc kubenswrapper[5037]: I1126 14:41:31.168204 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-2e29-account-create-update-lrvct" event={"ID":"56e600a6-1625-4844-9a51-da79b454cd34","Type":"ContainerDied","Data":"1a37e041cfbebe66b77bc0f18efdcf4b5deb2e9cf0cd0c8a577ccb16cb667c59"} Nov 26 14:41:31 crc kubenswrapper[5037]: I1126 14:41:31.170450 5037 generic.go:334] "Generic (PLEG): container finished" podID="86867a49-37d5-4289-a31e-8eed1257c87a" containerID="ce804c960eacc58c7f42b83f833dff6342613db249b38fc647fe7cedf8112368" exitCode=0 Nov 26 14:41:31 crc kubenswrapper[5037]: I1126 14:41:31.170544 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-5zbnt" event={"ID":"86867a49-37d5-4289-a31e-8eed1257c87a","Type":"ContainerDied","Data":"ce804c960eacc58c7f42b83f833dff6342613db249b38fc647fe7cedf8112368"} Nov 26 14:41:31 crc kubenswrapper[5037]: I1126 14:41:31.172160 5037 generic.go:334] "Generic (PLEG): container finished" podID="ab0d1aa1-4e89-4fa1-a06d-a199ed98670f" containerID="cf8bcb3dd095d034ddca525521c17df5621fc3fb75c9a732ffd5638bbb28d0ed" exitCode=0 Nov 26 14:41:31 crc kubenswrapper[5037]: I1126 14:41:31.172269 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-eb2b-account-create-update-92xtm" event={"ID":"ab0d1aa1-4e89-4fa1-a06d-a199ed98670f","Type":"ContainerDied","Data":"cf8bcb3dd095d034ddca525521c17df5621fc3fb75c9a732ffd5638bbb28d0ed"} Nov 26 14:41:31 crc kubenswrapper[5037]: I1126 14:41:31.490492 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-7f55999cfc-jx9r6" Nov 26 14:41:31 crc kubenswrapper[5037]: I1126 14:41:31.496966 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-7f55999cfc-jx9r6" Nov 26 14:41:31 crc kubenswrapper[5037]: I1126 14:41:31.549296 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-e6d6-account-create-update-jg9lh" Nov 26 14:41:31 crc kubenswrapper[5037]: I1126 14:41:31.651930 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ece5ba73-c8c1-4d22-ac0c-411c8c59e969-operator-scripts\") pod \"ece5ba73-c8c1-4d22-ac0c-411c8c59e969\" (UID: \"ece5ba73-c8c1-4d22-ac0c-411c8c59e969\") " Nov 26 14:41:31 crc kubenswrapper[5037]: I1126 14:41:31.652382 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mgmrq\" (UniqueName: \"kubernetes.io/projected/ece5ba73-c8c1-4d22-ac0c-411c8c59e969-kube-api-access-mgmrq\") pod \"ece5ba73-c8c1-4d22-ac0c-411c8c59e969\" (UID: \"ece5ba73-c8c1-4d22-ac0c-411c8c59e969\") " Nov 26 14:41:31 crc kubenswrapper[5037]: I1126 14:41:31.652604 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ece5ba73-c8c1-4d22-ac0c-411c8c59e969-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ece5ba73-c8c1-4d22-ac0c-411c8c59e969" (UID: "ece5ba73-c8c1-4d22-ac0c-411c8c59e969"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:41:31 crc kubenswrapper[5037]: I1126 14:41:31.652888 5037 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ece5ba73-c8c1-4d22-ac0c-411c8c59e969-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:31 crc kubenswrapper[5037]: I1126 14:41:31.655849 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ece5ba73-c8c1-4d22-ac0c-411c8c59e969-kube-api-access-mgmrq" (OuterVolumeSpecName: "kube-api-access-mgmrq") pod "ece5ba73-c8c1-4d22-ac0c-411c8c59e969" (UID: "ece5ba73-c8c1-4d22-ac0c-411c8c59e969"). InnerVolumeSpecName "kube-api-access-mgmrq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:41:31 crc kubenswrapper[5037]: I1126 14:41:31.754981 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mgmrq\" (UniqueName: \"kubernetes.io/projected/ece5ba73-c8c1-4d22-ac0c-411c8c59e969-kube-api-access-mgmrq\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:31 crc kubenswrapper[5037]: I1126 14:41:31.921038 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ec55994-5e0e-4f1e-aece-501836b46c63" path="/var/lib/kubelet/pods/8ec55994-5e0e-4f1e-aece-501836b46c63/volumes" Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.062800 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-external-api-0" podUID="51577a49-9ff5-419f-ade6-6b5a9abbf7c0" containerName="glance-log" probeResult="failure" output="Get \"https://10.217.0.146:9292/healthcheck\": read tcp 10.217.0.2:55556->10.217.0.146:9292: read: connection reset by peer" Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.062800 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-external-api-0" podUID="51577a49-9ff5-419f-ade6-6b5a9abbf7c0" containerName="glance-httpd" probeResult="failure" output="Get \"https://10.217.0.146:9292/healthcheck\": read tcp 10.217.0.2:55572->10.217.0.146:9292: read: connection reset by peer" Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.188479 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-e6d6-account-create-update-jg9lh" event={"ID":"ece5ba73-c8c1-4d22-ac0c-411c8c59e969","Type":"ContainerDied","Data":"7fc0e12b4ef9fc9ff988f5f825f879bed93abc06b85b92a6371d028dc443e43a"} Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.188712 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7fc0e12b4ef9fc9ff988f5f825f879bed93abc06b85b92a6371d028dc443e43a" Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.188764 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-e6d6-account-create-update-jg9lh" Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.190741 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fcecbf16-4ee2-4c4e-93d3-9013208af0c3","Type":"ContainerStarted","Data":"cc76418a53af8d2f96ece89ba75860a696b2833d6e203ace19f86a77935f31e9"} Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.195386 5037 generic.go:334] "Generic (PLEG): container finished" podID="51577a49-9ff5-419f-ade6-6b5a9abbf7c0" containerID="979e78ffbe0bc4990e5111e8c7519d935e4c9ab922a1af6d9c17cf6aff56f82d" exitCode=0 Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.195567 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"51577a49-9ff5-419f-ade6-6b5a9abbf7c0","Type":"ContainerDied","Data":"979e78ffbe0bc4990e5111e8c7519d935e4c9ab922a1af6d9c17cf6aff56f82d"} Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.245331 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.546612 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.578830 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-public-tls-certs\") pod \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\" (UID: \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\") " Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.578897 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-logs\") pod \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\" (UID: \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\") " Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.578984 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-httpd-run\") pod \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\" (UID: \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\") " Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.582957 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "51577a49-9ff5-419f-ade6-6b5a9abbf7c0" (UID: "51577a49-9ff5-419f-ade6-6b5a9abbf7c0"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.582985 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-logs" (OuterVolumeSpecName: "logs") pod "51577a49-9ff5-419f-ade6-6b5a9abbf7c0" (UID: "51577a49-9ff5-419f-ade6-6b5a9abbf7c0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.583137 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-scripts\") pod \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\" (UID: \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\") " Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.583195 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-combined-ca-bundle\") pod \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\" (UID: \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\") " Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.583246 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\" (UID: \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\") " Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.583273 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7gslp\" (UniqueName: \"kubernetes.io/projected/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-kube-api-access-7gslp\") pod \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\" (UID: \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\") " Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.583308 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-config-data\") pod \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\" (UID: \"51577a49-9ff5-419f-ade6-6b5a9abbf7c0\") " Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.583909 5037 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-logs\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.583924 5037 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.598419 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-scripts" (OuterVolumeSpecName: "scripts") pod "51577a49-9ff5-419f-ade6-6b5a9abbf7c0" (UID: "51577a49-9ff5-419f-ade6-6b5a9abbf7c0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.598443 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "glance") pod "51577a49-9ff5-419f-ade6-6b5a9abbf7c0" (UID: "51577a49-9ff5-419f-ade6-6b5a9abbf7c0"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.606567 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-kube-api-access-7gslp" (OuterVolumeSpecName: "kube-api-access-7gslp") pod "51577a49-9ff5-419f-ade6-6b5a9abbf7c0" (UID: "51577a49-9ff5-419f-ade6-6b5a9abbf7c0"). InnerVolumeSpecName "kube-api-access-7gslp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.641447 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "51577a49-9ff5-419f-ade6-6b5a9abbf7c0" (UID: "51577a49-9ff5-419f-ade6-6b5a9abbf7c0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.680477 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-config-data" (OuterVolumeSpecName: "config-data") pod "51577a49-9ff5-419f-ade6-6b5a9abbf7c0" (UID: "51577a49-9ff5-419f-ade6-6b5a9abbf7c0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.685498 5037 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.685529 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.685557 5037 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.685567 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7gslp\" (UniqueName: \"kubernetes.io/projected/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-kube-api-access-7gslp\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.685577 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.686919 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "51577a49-9ff5-419f-ade6-6b5a9abbf7c0" (UID: "51577a49-9ff5-419f-ade6-6b5a9abbf7c0"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.719837 5037 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.787325 5037 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.787350 5037 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/51577a49-9ff5-419f-ade6-6b5a9abbf7c0-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.806527 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-2e29-account-create-update-lrvct" Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.822045 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-m4hls" Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.846428 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-5zbnt" Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.862050 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-j65gt" Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.902051 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-eb2b-account-create-update-92xtm" Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.908357 5037 scope.go:117] "RemoveContainer" containerID="5e69d7717514aa68d798cc4f8eee9b2d5d3e9666ca3b110c2cb4c6b90f9e1181" Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.908686 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hc458\" (UniqueName: \"kubernetes.io/projected/86867a49-37d5-4289-a31e-8eed1257c87a-kube-api-access-hc458\") pod \"86867a49-37d5-4289-a31e-8eed1257c87a\" (UID: \"86867a49-37d5-4289-a31e-8eed1257c87a\") " Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.908754 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2j6zx\" (UniqueName: \"kubernetes.io/projected/56e600a6-1625-4844-9a51-da79b454cd34-kube-api-access-2j6zx\") pod \"56e600a6-1625-4844-9a51-da79b454cd34\" (UID: \"56e600a6-1625-4844-9a51-da79b454cd34\") " Nov 26 14:41:32 crc kubenswrapper[5037]: E1126 14:41:32.908776 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.908787 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/56e600a6-1625-4844-9a51-da79b454cd34-operator-scripts\") pod \"56e600a6-1625-4844-9a51-da79b454cd34\" (UID: \"56e600a6-1625-4844-9a51-da79b454cd34\") " Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.908966 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ft6mc\" (UniqueName: \"kubernetes.io/projected/b9a76d5f-5242-4fbf-a824-970d9ffcc3ad-kube-api-access-ft6mc\") pod \"b9a76d5f-5242-4fbf-a824-970d9ffcc3ad\" (UID: \"b9a76d5f-5242-4fbf-a824-970d9ffcc3ad\") " Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.909017 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/86867a49-37d5-4289-a31e-8eed1257c87a-operator-scripts\") pod \"86867a49-37d5-4289-a31e-8eed1257c87a\" (UID: \"86867a49-37d5-4289-a31e-8eed1257c87a\") " Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.909044 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b9a76d5f-5242-4fbf-a824-970d9ffcc3ad-operator-scripts\") pod \"b9a76d5f-5242-4fbf-a824-970d9ffcc3ad\" (UID: \"b9a76d5f-5242-4fbf-a824-970d9ffcc3ad\") " Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.909968 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/56e600a6-1625-4844-9a51-da79b454cd34-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "56e600a6-1625-4844-9a51-da79b454cd34" (UID: "56e600a6-1625-4844-9a51-da79b454cd34"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.910326 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b9a76d5f-5242-4fbf-a824-970d9ffcc3ad-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b9a76d5f-5242-4fbf-a824-970d9ffcc3ad" (UID: "b9a76d5f-5242-4fbf-a824-970d9ffcc3ad"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.910721 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86867a49-37d5-4289-a31e-8eed1257c87a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "86867a49-37d5-4289-a31e-8eed1257c87a" (UID: "86867a49-37d5-4289-a31e-8eed1257c87a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.913474 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/56e600a6-1625-4844-9a51-da79b454cd34-kube-api-access-2j6zx" (OuterVolumeSpecName: "kube-api-access-2j6zx") pod "56e600a6-1625-4844-9a51-da79b454cd34" (UID: "56e600a6-1625-4844-9a51-da79b454cd34"). InnerVolumeSpecName "kube-api-access-2j6zx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.919470 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9a76d5f-5242-4fbf-a824-970d9ffcc3ad-kube-api-access-ft6mc" (OuterVolumeSpecName: "kube-api-access-ft6mc") pod "b9a76d5f-5242-4fbf-a824-970d9ffcc3ad" (UID: "b9a76d5f-5242-4fbf-a824-970d9ffcc3ad"). InnerVolumeSpecName "kube-api-access-ft6mc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:41:32 crc kubenswrapper[5037]: I1126 14:41:32.919926 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/86867a49-37d5-4289-a31e-8eed1257c87a-kube-api-access-hc458" (OuterVolumeSpecName: "kube-api-access-hc458") pod "86867a49-37d5-4289-a31e-8eed1257c87a" (UID: "86867a49-37d5-4289-a31e-8eed1257c87a"). InnerVolumeSpecName "kube-api-access-hc458". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.011885 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ab0d1aa1-4e89-4fa1-a06d-a199ed98670f-operator-scripts\") pod \"ab0d1aa1-4e89-4fa1-a06d-a199ed98670f\" (UID: \"ab0d1aa1-4e89-4fa1-a06d-a199ed98670f\") " Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.012351 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vdg42\" (UniqueName: \"kubernetes.io/projected/b5f1c1b7-8cc8-4be5-893b-83d6ee0e5a79-kube-api-access-vdg42\") pod \"b5f1c1b7-8cc8-4be5-893b-83d6ee0e5a79\" (UID: \"b5f1c1b7-8cc8-4be5-893b-83d6ee0e5a79\") " Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.012470 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b5f1c1b7-8cc8-4be5-893b-83d6ee0e5a79-operator-scripts\") pod \"b5f1c1b7-8cc8-4be5-893b-83d6ee0e5a79\" (UID: \"b5f1c1b7-8cc8-4be5-893b-83d6ee0e5a79\") " Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.012535 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kpzj7\" (UniqueName: \"kubernetes.io/projected/ab0d1aa1-4e89-4fa1-a06d-a199ed98670f-kube-api-access-kpzj7\") pod \"ab0d1aa1-4e89-4fa1-a06d-a199ed98670f\" (UID: \"ab0d1aa1-4e89-4fa1-a06d-a199ed98670f\") " Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.013877 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b5f1c1b7-8cc8-4be5-893b-83d6ee0e5a79-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b5f1c1b7-8cc8-4be5-893b-83d6ee0e5a79" (UID: "b5f1c1b7-8cc8-4be5-893b-83d6ee0e5a79"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.014135 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hc458\" (UniqueName: \"kubernetes.io/projected/86867a49-37d5-4289-a31e-8eed1257c87a-kube-api-access-hc458\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.014214 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2j6zx\" (UniqueName: \"kubernetes.io/projected/56e600a6-1625-4844-9a51-da79b454cd34-kube-api-access-2j6zx\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.014236 5037 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/56e600a6-1625-4844-9a51-da79b454cd34-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.014253 5037 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b5f1c1b7-8cc8-4be5-893b-83d6ee0e5a79-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.014270 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ft6mc\" (UniqueName: \"kubernetes.io/projected/b9a76d5f-5242-4fbf-a824-970d9ffcc3ad-kube-api-access-ft6mc\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.014992 5037 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/86867a49-37d5-4289-a31e-8eed1257c87a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.015012 5037 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b9a76d5f-5242-4fbf-a824-970d9ffcc3ad-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.015149 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ab0d1aa1-4e89-4fa1-a06d-a199ed98670f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ab0d1aa1-4e89-4fa1-a06d-a199ed98670f" (UID: "ab0d1aa1-4e89-4fa1-a06d-a199ed98670f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.018517 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab0d1aa1-4e89-4fa1-a06d-a199ed98670f-kube-api-access-kpzj7" (OuterVolumeSpecName: "kube-api-access-kpzj7") pod "ab0d1aa1-4e89-4fa1-a06d-a199ed98670f" (UID: "ab0d1aa1-4e89-4fa1-a06d-a199ed98670f"). InnerVolumeSpecName "kube-api-access-kpzj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.021245 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5f1c1b7-8cc8-4be5-893b-83d6ee0e5a79-kube-api-access-vdg42" (OuterVolumeSpecName: "kube-api-access-vdg42") pod "b5f1c1b7-8cc8-4be5-893b-83d6ee0e5a79" (UID: "b5f1c1b7-8cc8-4be5-893b-83d6ee0e5a79"). InnerVolumeSpecName "kube-api-access-vdg42". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.116759 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vdg42\" (UniqueName: \"kubernetes.io/projected/b5f1c1b7-8cc8-4be5-893b-83d6ee0e5a79-kube-api-access-vdg42\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.117044 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kpzj7\" (UniqueName: \"kubernetes.io/projected/ab0d1aa1-4e89-4fa1-a06d-a199ed98670f-kube-api-access-kpzj7\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.117058 5037 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ab0d1aa1-4e89-4fa1-a06d-a199ed98670f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.207454 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-2e29-account-create-update-lrvct" event={"ID":"56e600a6-1625-4844-9a51-da79b454cd34","Type":"ContainerDied","Data":"b46f5e97ea8d35075c9f2573888c41b3505f1d9189a8d72bbaecfccf51d8e784"} Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.207496 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b46f5e97ea8d35075c9f2573888c41b3505f1d9189a8d72bbaecfccf51d8e784" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.207572 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-2e29-account-create-update-lrvct" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.222908 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-5zbnt" event={"ID":"86867a49-37d5-4289-a31e-8eed1257c87a","Type":"ContainerDied","Data":"0de3f3e15fa11c2aaa7a60fc97b70425ee848f36ac666681f5fd19972ceb4284"} Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.222949 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0de3f3e15fa11c2aaa7a60fc97b70425ee848f36ac666681f5fd19972ceb4284" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.223034 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-5zbnt" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.228081 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-eb2b-account-create-update-92xtm" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.228114 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-eb2b-account-create-update-92xtm" event={"ID":"ab0d1aa1-4e89-4fa1-a06d-a199ed98670f","Type":"ContainerDied","Data":"7f990c78a7c584a8186f85ac8606b9254f680b0f3bb403c3bcef3257ddcc3c04"} Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.228153 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7f990c78a7c584a8186f85ac8606b9254f680b0f3bb403c3bcef3257ddcc3c04" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.231432 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fcecbf16-4ee2-4c4e-93d3-9013208af0c3","Type":"ContainerStarted","Data":"09e8506397472e73770555c2a7ecc07c37b1abcbbc2547e65426ccc640ba0c20"} Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.232813 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-m4hls" event={"ID":"b9a76d5f-5242-4fbf-a824-970d9ffcc3ad","Type":"ContainerDied","Data":"381b099e5d7bcaa2c2e0ade4b111b3c5f117147876f64712660d93511962014a"} Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.232838 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="381b099e5d7bcaa2c2e0ade4b111b3c5f117147876f64712660d93511962014a" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.232898 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-m4hls" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.243531 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-j65gt" event={"ID":"b5f1c1b7-8cc8-4be5-893b-83d6ee0e5a79","Type":"ContainerDied","Data":"f01a478a7f8e4f6c00e9322b70050ee6b5c52b4e04e666254402374cc97cb01d"} Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.243566 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f01a478a7f8e4f6c00e9322b70050ee6b5c52b4e04e666254402374cc97cb01d" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.243629 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-j65gt" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.247847 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"51577a49-9ff5-419f-ade6-6b5a9abbf7c0","Type":"ContainerDied","Data":"e8ade553db88109f753c04de8b571fb16f362385cbcb57c5494ff504fdf2bb86"} Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.247904 5037 scope.go:117] "RemoveContainer" containerID="979e78ffbe0bc4990e5111e8c7519d935e4c9ab922a1af6d9c17cf6aff56f82d" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.248093 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.297471 5037 scope.go:117] "RemoveContainer" containerID="5f860a50e8a312cb271e8663accd0b603f1a9197ed83358443cce54a067f67bc" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.343785 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.365659 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.400468 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 14:41:33 crc kubenswrapper[5037]: E1126 14:41:33.401148 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86867a49-37d5-4289-a31e-8eed1257c87a" containerName="mariadb-database-create" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.401248 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="86867a49-37d5-4289-a31e-8eed1257c87a" containerName="mariadb-database-create" Nov 26 14:41:33 crc kubenswrapper[5037]: E1126 14:41:33.401343 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56e600a6-1625-4844-9a51-da79b454cd34" containerName="mariadb-account-create-update" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.401432 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="56e600a6-1625-4844-9a51-da79b454cd34" containerName="mariadb-account-create-update" Nov 26 14:41:33 crc kubenswrapper[5037]: E1126 14:41:33.401492 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab0d1aa1-4e89-4fa1-a06d-a199ed98670f" containerName="mariadb-account-create-update" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.401541 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab0d1aa1-4e89-4fa1-a06d-a199ed98670f" containerName="mariadb-account-create-update" Nov 26 14:41:33 crc kubenswrapper[5037]: E1126 14:41:33.401604 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5f1c1b7-8cc8-4be5-893b-83d6ee0e5a79" containerName="mariadb-database-create" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.401656 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5f1c1b7-8cc8-4be5-893b-83d6ee0e5a79" containerName="mariadb-database-create" Nov 26 14:41:33 crc kubenswrapper[5037]: E1126 14:41:33.401711 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9a76d5f-5242-4fbf-a824-970d9ffcc3ad" containerName="mariadb-database-create" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.401766 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9a76d5f-5242-4fbf-a824-970d9ffcc3ad" containerName="mariadb-database-create" Nov 26 14:41:33 crc kubenswrapper[5037]: E1126 14:41:33.401823 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51577a49-9ff5-419f-ade6-6b5a9abbf7c0" containerName="glance-httpd" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.401872 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="51577a49-9ff5-419f-ade6-6b5a9abbf7c0" containerName="glance-httpd" Nov 26 14:41:33 crc kubenswrapper[5037]: E1126 14:41:33.401942 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51577a49-9ff5-419f-ade6-6b5a9abbf7c0" containerName="glance-log" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.401999 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="51577a49-9ff5-419f-ade6-6b5a9abbf7c0" containerName="glance-log" Nov 26 14:41:33 crc kubenswrapper[5037]: E1126 14:41:33.402057 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ece5ba73-c8c1-4d22-ac0c-411c8c59e969" containerName="mariadb-account-create-update" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.402115 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="ece5ba73-c8c1-4d22-ac0c-411c8c59e969" containerName="mariadb-account-create-update" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.402374 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="ece5ba73-c8c1-4d22-ac0c-411c8c59e969" containerName="mariadb-account-create-update" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.402472 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9a76d5f-5242-4fbf-a824-970d9ffcc3ad" containerName="mariadb-database-create" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.402548 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="51577a49-9ff5-419f-ade6-6b5a9abbf7c0" containerName="glance-log" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.402607 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5f1c1b7-8cc8-4be5-893b-83d6ee0e5a79" containerName="mariadb-database-create" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.402680 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="86867a49-37d5-4289-a31e-8eed1257c87a" containerName="mariadb-database-create" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.402744 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="51577a49-9ff5-419f-ade6-6b5a9abbf7c0" containerName="glance-httpd" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.402810 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab0d1aa1-4e89-4fa1-a06d-a199ed98670f" containerName="mariadb-account-create-update" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.402863 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="56e600a6-1625-4844-9a51-da79b454cd34" containerName="mariadb-account-create-update" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.403888 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.406242 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.407027 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.407160 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.541219 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/693d1a99-bf33-42ee-adea-2f8ce0f6c002-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\") " pod="openstack/glance-default-external-api-0" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.541260 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jpqmn\" (UniqueName: \"kubernetes.io/projected/693d1a99-bf33-42ee-adea-2f8ce0f6c002-kube-api-access-jpqmn\") pod \"glance-default-external-api-0\" (UID: \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\") " pod="openstack/glance-default-external-api-0" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.541330 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/693d1a99-bf33-42ee-adea-2f8ce0f6c002-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\") " pod="openstack/glance-default-external-api-0" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.541353 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/693d1a99-bf33-42ee-adea-2f8ce0f6c002-logs\") pod \"glance-default-external-api-0\" (UID: \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\") " pod="openstack/glance-default-external-api-0" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.541384 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/693d1a99-bf33-42ee-adea-2f8ce0f6c002-config-data\") pod \"glance-default-external-api-0\" (UID: \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\") " pod="openstack/glance-default-external-api-0" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.541404 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/693d1a99-bf33-42ee-adea-2f8ce0f6c002-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\") " pod="openstack/glance-default-external-api-0" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.541484 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\") " pod="openstack/glance-default-external-api-0" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.541564 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/693d1a99-bf33-42ee-adea-2f8ce0f6c002-scripts\") pod \"glance-default-external-api-0\" (UID: \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\") " pod="openstack/glance-default-external-api-0" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.643445 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/693d1a99-bf33-42ee-adea-2f8ce0f6c002-scripts\") pod \"glance-default-external-api-0\" (UID: \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\") " pod="openstack/glance-default-external-api-0" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.643527 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/693d1a99-bf33-42ee-adea-2f8ce0f6c002-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\") " pod="openstack/glance-default-external-api-0" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.643555 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jpqmn\" (UniqueName: \"kubernetes.io/projected/693d1a99-bf33-42ee-adea-2f8ce0f6c002-kube-api-access-jpqmn\") pod \"glance-default-external-api-0\" (UID: \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\") " pod="openstack/glance-default-external-api-0" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.643620 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/693d1a99-bf33-42ee-adea-2f8ce0f6c002-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\") " pod="openstack/glance-default-external-api-0" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.643646 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/693d1a99-bf33-42ee-adea-2f8ce0f6c002-logs\") pod \"glance-default-external-api-0\" (UID: \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\") " pod="openstack/glance-default-external-api-0" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.643684 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/693d1a99-bf33-42ee-adea-2f8ce0f6c002-config-data\") pod \"glance-default-external-api-0\" (UID: \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\") " pod="openstack/glance-default-external-api-0" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.643711 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/693d1a99-bf33-42ee-adea-2f8ce0f6c002-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\") " pod="openstack/glance-default-external-api-0" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.643757 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\") " pod="openstack/glance-default-external-api-0" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.644239 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/693d1a99-bf33-42ee-adea-2f8ce0f6c002-logs\") pod \"glance-default-external-api-0\" (UID: \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\") " pod="openstack/glance-default-external-api-0" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.644276 5037 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/glance-default-external-api-0" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.644586 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/693d1a99-bf33-42ee-adea-2f8ce0f6c002-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\") " pod="openstack/glance-default-external-api-0" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.649211 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/693d1a99-bf33-42ee-adea-2f8ce0f6c002-scripts\") pod \"glance-default-external-api-0\" (UID: \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\") " pod="openstack/glance-default-external-api-0" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.649515 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/693d1a99-bf33-42ee-adea-2f8ce0f6c002-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\") " pod="openstack/glance-default-external-api-0" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.650973 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/693d1a99-bf33-42ee-adea-2f8ce0f6c002-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\") " pod="openstack/glance-default-external-api-0" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.651191 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/693d1a99-bf33-42ee-adea-2f8ce0f6c002-config-data\") pod \"glance-default-external-api-0\" (UID: \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\") " pod="openstack/glance-default-external-api-0" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.666945 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jpqmn\" (UniqueName: \"kubernetes.io/projected/693d1a99-bf33-42ee-adea-2f8ce0f6c002-kube-api-access-jpqmn\") pod \"glance-default-external-api-0\" (UID: \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\") " pod="openstack/glance-default-external-api-0" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.674789 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\") " pod="openstack/glance-default-external-api-0" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.722104 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 14:41:33 crc kubenswrapper[5037]: I1126 14:41:33.958956 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="51577a49-9ff5-419f-ade6-6b5a9abbf7c0" path="/var/lib/kubelet/pods/51577a49-9ff5-419f-ade6-6b5a9abbf7c0/volumes" Nov 26 14:41:34 crc kubenswrapper[5037]: I1126 14:41:34.079503 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 14:41:34 crc kubenswrapper[5037]: W1126 14:41:34.103718 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod693d1a99_bf33_42ee_adea_2f8ce0f6c002.slice/crio-8059a452772362925551c8d75ae0d9eebb593c0efe33628aaa1de6f5a5389f15 WatchSource:0}: Error finding container 8059a452772362925551c8d75ae0d9eebb593c0efe33628aaa1de6f5a5389f15: Status 404 returned error can't find the container with id 8059a452772362925551c8d75ae0d9eebb593c0efe33628aaa1de6f5a5389f15 Nov 26 14:41:34 crc kubenswrapper[5037]: I1126 14:41:34.263996 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"693d1a99-bf33-42ee-adea-2f8ce0f6c002","Type":"ContainerStarted","Data":"8059a452772362925551c8d75ae0d9eebb593c0efe33628aaa1de6f5a5389f15"} Nov 26 14:41:34 crc kubenswrapper[5037]: I1126 14:41:34.266839 5037 generic.go:334] "Generic (PLEG): container finished" podID="217b743f-dd2d-4fac-b61e-4ecd43e540d4" containerID="2ade519bc8f930f63a7e98b174c9ebeb57533c95227f89b8a7c1a16fc7313c88" exitCode=0 Nov 26 14:41:34 crc kubenswrapper[5037]: I1126 14:41:34.266878 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"217b743f-dd2d-4fac-b61e-4ecd43e540d4","Type":"ContainerDied","Data":"2ade519bc8f930f63a7e98b174c9ebeb57533c95227f89b8a7c1a16fc7313c88"} Nov 26 14:41:34 crc kubenswrapper[5037]: I1126 14:41:34.272194 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fcecbf16-4ee2-4c4e-93d3-9013208af0c3","Type":"ContainerStarted","Data":"ba857add9d098fdaab6bc8f408d70cd8b862d5862e9845cd361db2db8eb2fdd5"} Nov 26 14:41:34 crc kubenswrapper[5037]: I1126 14:41:34.527253 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 14:41:34 crc kubenswrapper[5037]: I1126 14:41:34.685110 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/217b743f-dd2d-4fac-b61e-4ecd43e540d4-combined-ca-bundle\") pod \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\" (UID: \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\") " Nov 26 14:41:34 crc kubenswrapper[5037]: I1126 14:41:34.685206 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/217b743f-dd2d-4fac-b61e-4ecd43e540d4-scripts\") pod \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\" (UID: \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\") " Nov 26 14:41:34 crc kubenswrapper[5037]: I1126 14:41:34.685307 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/217b743f-dd2d-4fac-b61e-4ecd43e540d4-config-data\") pod \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\" (UID: \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\") " Nov 26 14:41:34 crc kubenswrapper[5037]: I1126 14:41:34.685364 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/217b743f-dd2d-4fac-b61e-4ecd43e540d4-httpd-run\") pod \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\" (UID: \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\") " Nov 26 14:41:34 crc kubenswrapper[5037]: I1126 14:41:34.685417 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/217b743f-dd2d-4fac-b61e-4ecd43e540d4-logs\") pod \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\" (UID: \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\") " Nov 26 14:41:34 crc kubenswrapper[5037]: I1126 14:41:34.685439 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rgjxw\" (UniqueName: \"kubernetes.io/projected/217b743f-dd2d-4fac-b61e-4ecd43e540d4-kube-api-access-rgjxw\") pod \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\" (UID: \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\") " Nov 26 14:41:34 crc kubenswrapper[5037]: I1126 14:41:34.685478 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\" (UID: \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\") " Nov 26 14:41:34 crc kubenswrapper[5037]: I1126 14:41:34.685508 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/217b743f-dd2d-4fac-b61e-4ecd43e540d4-internal-tls-certs\") pod \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\" (UID: \"217b743f-dd2d-4fac-b61e-4ecd43e540d4\") " Nov 26 14:41:34 crc kubenswrapper[5037]: I1126 14:41:34.686273 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/217b743f-dd2d-4fac-b61e-4ecd43e540d4-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "217b743f-dd2d-4fac-b61e-4ecd43e540d4" (UID: "217b743f-dd2d-4fac-b61e-4ecd43e540d4"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:41:34 crc kubenswrapper[5037]: I1126 14:41:34.688545 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/217b743f-dd2d-4fac-b61e-4ecd43e540d4-logs" (OuterVolumeSpecName: "logs") pod "217b743f-dd2d-4fac-b61e-4ecd43e540d4" (UID: "217b743f-dd2d-4fac-b61e-4ecd43e540d4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:41:34 crc kubenswrapper[5037]: I1126 14:41:34.692384 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/217b743f-dd2d-4fac-b61e-4ecd43e540d4-scripts" (OuterVolumeSpecName: "scripts") pod "217b743f-dd2d-4fac-b61e-4ecd43e540d4" (UID: "217b743f-dd2d-4fac-b61e-4ecd43e540d4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:34 crc kubenswrapper[5037]: I1126 14:41:34.692632 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "glance") pod "217b743f-dd2d-4fac-b61e-4ecd43e540d4" (UID: "217b743f-dd2d-4fac-b61e-4ecd43e540d4"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 14:41:34 crc kubenswrapper[5037]: I1126 14:41:34.693841 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/217b743f-dd2d-4fac-b61e-4ecd43e540d4-kube-api-access-rgjxw" (OuterVolumeSpecName: "kube-api-access-rgjxw") pod "217b743f-dd2d-4fac-b61e-4ecd43e540d4" (UID: "217b743f-dd2d-4fac-b61e-4ecd43e540d4"). InnerVolumeSpecName "kube-api-access-rgjxw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:41:34 crc kubenswrapper[5037]: I1126 14:41:34.732470 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/217b743f-dd2d-4fac-b61e-4ecd43e540d4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "217b743f-dd2d-4fac-b61e-4ecd43e540d4" (UID: "217b743f-dd2d-4fac-b61e-4ecd43e540d4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:34 crc kubenswrapper[5037]: I1126 14:41:34.754908 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/217b743f-dd2d-4fac-b61e-4ecd43e540d4-config-data" (OuterVolumeSpecName: "config-data") pod "217b743f-dd2d-4fac-b61e-4ecd43e540d4" (UID: "217b743f-dd2d-4fac-b61e-4ecd43e540d4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:34 crc kubenswrapper[5037]: I1126 14:41:34.762348 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/217b743f-dd2d-4fac-b61e-4ecd43e540d4-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "217b743f-dd2d-4fac-b61e-4ecd43e540d4" (UID: "217b743f-dd2d-4fac-b61e-4ecd43e540d4"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:34 crc kubenswrapper[5037]: I1126 14:41:34.797552 5037 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/217b743f-dd2d-4fac-b61e-4ecd43e540d4-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:34 crc kubenswrapper[5037]: I1126 14:41:34.819156 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/217b743f-dd2d-4fac-b61e-4ecd43e540d4-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:34 crc kubenswrapper[5037]: I1126 14:41:34.819663 5037 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/217b743f-dd2d-4fac-b61e-4ecd43e540d4-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:34 crc kubenswrapper[5037]: I1126 14:41:34.820112 5037 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/217b743f-dd2d-4fac-b61e-4ecd43e540d4-logs\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:34 crc kubenswrapper[5037]: I1126 14:41:34.820136 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rgjxw\" (UniqueName: \"kubernetes.io/projected/217b743f-dd2d-4fac-b61e-4ecd43e540d4-kube-api-access-rgjxw\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:34 crc kubenswrapper[5037]: I1126 14:41:34.820182 5037 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Nov 26 14:41:34 crc kubenswrapper[5037]: I1126 14:41:34.820196 5037 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/217b743f-dd2d-4fac-b61e-4ecd43e540d4-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:34 crc kubenswrapper[5037]: I1126 14:41:34.820206 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/217b743f-dd2d-4fac-b61e-4ecd43e540d4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:34 crc kubenswrapper[5037]: I1126 14:41:34.870601 5037 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Nov 26 14:41:34 crc kubenswrapper[5037]: I1126 14:41:34.922252 5037 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.282738 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"693d1a99-bf33-42ee-adea-2f8ce0f6c002","Type":"ContainerStarted","Data":"b49e79776c4c1720c6692646b9ec69e22007400cd025b41070ff9a874d805f29"} Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.284971 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"217b743f-dd2d-4fac-b61e-4ecd43e540d4","Type":"ContainerDied","Data":"3f62c38a0803b969f4015fa7b32b1291a4cd52b39b14c4d3f6c3eba3640d6b50"} Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.285050 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.285054 5037 scope.go:117] "RemoveContainer" containerID="2ade519bc8f930f63a7e98b174c9ebeb57533c95227f89b8a7c1a16fc7313c88" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.292616 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fcecbf16-4ee2-4c4e-93d3-9013208af0c3","Type":"ContainerStarted","Data":"6239f1a19694ce1098dcaa14ee229609a56d55d79549ebe899c17ddfb2d783c2"} Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.292843 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fcecbf16-4ee2-4c4e-93d3-9013208af0c3" containerName="ceilometer-central-agent" containerID="cri-o://cc76418a53af8d2f96ece89ba75860a696b2833d6e203ace19f86a77935f31e9" gracePeriod=30 Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.292925 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.292983 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fcecbf16-4ee2-4c4e-93d3-9013208af0c3" containerName="sg-core" containerID="cri-o://ba857add9d098fdaab6bc8f408d70cd8b862d5862e9845cd361db2db8eb2fdd5" gracePeriod=30 Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.293012 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fcecbf16-4ee2-4c4e-93d3-9013208af0c3" containerName="proxy-httpd" containerID="cri-o://6239f1a19694ce1098dcaa14ee229609a56d55d79549ebe899c17ddfb2d783c2" gracePeriod=30 Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.293023 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fcecbf16-4ee2-4c4e-93d3-9013208af0c3" containerName="ceilometer-notification-agent" containerID="cri-o://09e8506397472e73770555c2a7ecc07c37b1abcbbc2547e65426ccc640ba0c20" gracePeriod=30 Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.327444 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.5714962510000001 podStartE2EDuration="5.327419935s" podCreationTimestamp="2025-11-26 14:41:30 +0000 UTC" firstStartedPulling="2025-11-26 14:41:31.098074816 +0000 UTC m=+1557.894845000" lastFinishedPulling="2025-11-26 14:41:34.8539985 +0000 UTC m=+1561.650768684" observedRunningTime="2025-11-26 14:41:35.317256197 +0000 UTC m=+1562.114026391" watchObservedRunningTime="2025-11-26 14:41:35.327419935 +0000 UTC m=+1562.124190119" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.329313 5037 scope.go:117] "RemoveContainer" containerID="e45352dde5f5b9a92cfe20673f8c8ed00298f5bee8f04018fad41a90935adb7e" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.347690 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.364339 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.380636 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 14:41:35 crc kubenswrapper[5037]: E1126 14:41:35.381077 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="217b743f-dd2d-4fac-b61e-4ecd43e540d4" containerName="glance-httpd" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.381099 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="217b743f-dd2d-4fac-b61e-4ecd43e540d4" containerName="glance-httpd" Nov 26 14:41:35 crc kubenswrapper[5037]: E1126 14:41:35.381137 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="217b743f-dd2d-4fac-b61e-4ecd43e540d4" containerName="glance-log" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.381145 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="217b743f-dd2d-4fac-b61e-4ecd43e540d4" containerName="glance-log" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.381437 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="217b743f-dd2d-4fac-b61e-4ecd43e540d4" containerName="glance-log" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.381471 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="217b743f-dd2d-4fac-b61e-4ecd43e540d4" containerName="glance-httpd" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.417995 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.418086 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.425899 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.426794 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.541339 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.541683 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.541731 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zx96r\" (UniqueName: \"kubernetes.io/projected/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-kube-api-access-zx96r\") pod \"glance-default-internal-api-0\" (UID: \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.541762 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.541821 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-logs\") pod \"glance-default-internal-api-0\" (UID: \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.541846 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.541867 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.541906 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.643821 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.643924 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-logs\") pod \"glance-default-internal-api-0\" (UID: \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.643979 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.644031 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.644498 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-logs\") pod \"glance-default-internal-api-0\" (UID: \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.644656 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.644720 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.644766 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.644832 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zx96r\" (UniqueName: \"kubernetes.io/projected/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-kube-api-access-zx96r\") pod \"glance-default-internal-api-0\" (UID: \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.645030 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.645095 5037 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/glance-default-internal-api-0" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.652214 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.654328 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.666032 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.677828 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zx96r\" (UniqueName: \"kubernetes.io/projected/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-kube-api-access-zx96r\") pod \"glance-default-internal-api-0\" (UID: \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.686082 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.690785 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\") " pod="openstack/glance-default-internal-api-0" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.871003 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 14:41:35 crc kubenswrapper[5037]: I1126 14:41:35.923220 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="217b743f-dd2d-4fac-b61e-4ecd43e540d4" path="/var/lib/kubelet/pods/217b743f-dd2d-4fac-b61e-4ecd43e540d4/volumes" Nov 26 14:41:36 crc kubenswrapper[5037]: I1126 14:41:36.304743 5037 generic.go:334] "Generic (PLEG): container finished" podID="fcecbf16-4ee2-4c4e-93d3-9013208af0c3" containerID="6239f1a19694ce1098dcaa14ee229609a56d55d79549ebe899c17ddfb2d783c2" exitCode=0 Nov 26 14:41:36 crc kubenswrapper[5037]: I1126 14:41:36.304975 5037 generic.go:334] "Generic (PLEG): container finished" podID="fcecbf16-4ee2-4c4e-93d3-9013208af0c3" containerID="ba857add9d098fdaab6bc8f408d70cd8b862d5862e9845cd361db2db8eb2fdd5" exitCode=2 Nov 26 14:41:36 crc kubenswrapper[5037]: I1126 14:41:36.304984 5037 generic.go:334] "Generic (PLEG): container finished" podID="fcecbf16-4ee2-4c4e-93d3-9013208af0c3" containerID="09e8506397472e73770555c2a7ecc07c37b1abcbbc2547e65426ccc640ba0c20" exitCode=0 Nov 26 14:41:36 crc kubenswrapper[5037]: I1126 14:41:36.305013 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fcecbf16-4ee2-4c4e-93d3-9013208af0c3","Type":"ContainerDied","Data":"6239f1a19694ce1098dcaa14ee229609a56d55d79549ebe899c17ddfb2d783c2"} Nov 26 14:41:36 crc kubenswrapper[5037]: I1126 14:41:36.305037 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fcecbf16-4ee2-4c4e-93d3-9013208af0c3","Type":"ContainerDied","Data":"ba857add9d098fdaab6bc8f408d70cd8b862d5862e9845cd361db2db8eb2fdd5"} Nov 26 14:41:36 crc kubenswrapper[5037]: I1126 14:41:36.305046 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fcecbf16-4ee2-4c4e-93d3-9013208af0c3","Type":"ContainerDied","Data":"09e8506397472e73770555c2a7ecc07c37b1abcbbc2547e65426ccc640ba0c20"} Nov 26 14:41:36 crc kubenswrapper[5037]: I1126 14:41:36.306229 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"693d1a99-bf33-42ee-adea-2f8ce0f6c002","Type":"ContainerStarted","Data":"69f2b0b56cf2f3be40f2a859173b22b913b09b1aec2185348206ae5ef68d4747"} Nov 26 14:41:36 crc kubenswrapper[5037]: I1126 14:41:36.372916 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.3728979629999998 podStartE2EDuration="3.372897963s" podCreationTimestamp="2025-11-26 14:41:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:41:36.367997964 +0000 UTC m=+1563.164768148" watchObservedRunningTime="2025-11-26 14:41:36.372897963 +0000 UTC m=+1563.169668147" Nov 26 14:41:36 crc kubenswrapper[5037]: I1126 14:41:36.474220 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 14:41:36 crc kubenswrapper[5037]: I1126 14:41:36.805277 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-5kqgs"] Nov 26 14:41:36 crc kubenswrapper[5037]: I1126 14:41:36.807946 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-5kqgs" Nov 26 14:41:36 crc kubenswrapper[5037]: I1126 14:41:36.812104 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-dvt6s" Nov 26 14:41:36 crc kubenswrapper[5037]: I1126 14:41:36.812307 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 26 14:41:36 crc kubenswrapper[5037]: I1126 14:41:36.812346 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 26 14:41:36 crc kubenswrapper[5037]: I1126 14:41:36.857620 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-5kqgs"] Nov 26 14:41:36 crc kubenswrapper[5037]: I1126 14:41:36.870268 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jcwcf\" (UniqueName: \"kubernetes.io/projected/42522a9f-0861-47fb-9d66-65039590aeaf-kube-api-access-jcwcf\") pod \"nova-cell0-conductor-db-sync-5kqgs\" (UID: \"42522a9f-0861-47fb-9d66-65039590aeaf\") " pod="openstack/nova-cell0-conductor-db-sync-5kqgs" Nov 26 14:41:36 crc kubenswrapper[5037]: I1126 14:41:36.870372 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42522a9f-0861-47fb-9d66-65039590aeaf-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-5kqgs\" (UID: \"42522a9f-0861-47fb-9d66-65039590aeaf\") " pod="openstack/nova-cell0-conductor-db-sync-5kqgs" Nov 26 14:41:36 crc kubenswrapper[5037]: I1126 14:41:36.870474 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/42522a9f-0861-47fb-9d66-65039590aeaf-scripts\") pod \"nova-cell0-conductor-db-sync-5kqgs\" (UID: \"42522a9f-0861-47fb-9d66-65039590aeaf\") " pod="openstack/nova-cell0-conductor-db-sync-5kqgs" Nov 26 14:41:36 crc kubenswrapper[5037]: I1126 14:41:36.870505 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42522a9f-0861-47fb-9d66-65039590aeaf-config-data\") pod \"nova-cell0-conductor-db-sync-5kqgs\" (UID: \"42522a9f-0861-47fb-9d66-65039590aeaf\") " pod="openstack/nova-cell0-conductor-db-sync-5kqgs" Nov 26 14:41:36 crc kubenswrapper[5037]: I1126 14:41:36.972333 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/42522a9f-0861-47fb-9d66-65039590aeaf-scripts\") pod \"nova-cell0-conductor-db-sync-5kqgs\" (UID: \"42522a9f-0861-47fb-9d66-65039590aeaf\") " pod="openstack/nova-cell0-conductor-db-sync-5kqgs" Nov 26 14:41:36 crc kubenswrapper[5037]: I1126 14:41:36.972465 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42522a9f-0861-47fb-9d66-65039590aeaf-config-data\") pod \"nova-cell0-conductor-db-sync-5kqgs\" (UID: \"42522a9f-0861-47fb-9d66-65039590aeaf\") " pod="openstack/nova-cell0-conductor-db-sync-5kqgs" Nov 26 14:41:36 crc kubenswrapper[5037]: I1126 14:41:36.972541 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jcwcf\" (UniqueName: \"kubernetes.io/projected/42522a9f-0861-47fb-9d66-65039590aeaf-kube-api-access-jcwcf\") pod \"nova-cell0-conductor-db-sync-5kqgs\" (UID: \"42522a9f-0861-47fb-9d66-65039590aeaf\") " pod="openstack/nova-cell0-conductor-db-sync-5kqgs" Nov 26 14:41:36 crc kubenswrapper[5037]: I1126 14:41:36.972599 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42522a9f-0861-47fb-9d66-65039590aeaf-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-5kqgs\" (UID: \"42522a9f-0861-47fb-9d66-65039590aeaf\") " pod="openstack/nova-cell0-conductor-db-sync-5kqgs" Nov 26 14:41:36 crc kubenswrapper[5037]: I1126 14:41:36.978123 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42522a9f-0861-47fb-9d66-65039590aeaf-config-data\") pod \"nova-cell0-conductor-db-sync-5kqgs\" (UID: \"42522a9f-0861-47fb-9d66-65039590aeaf\") " pod="openstack/nova-cell0-conductor-db-sync-5kqgs" Nov 26 14:41:36 crc kubenswrapper[5037]: I1126 14:41:36.978961 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42522a9f-0861-47fb-9d66-65039590aeaf-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-5kqgs\" (UID: \"42522a9f-0861-47fb-9d66-65039590aeaf\") " pod="openstack/nova-cell0-conductor-db-sync-5kqgs" Nov 26 14:41:36 crc kubenswrapper[5037]: I1126 14:41:36.979157 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/42522a9f-0861-47fb-9d66-65039590aeaf-scripts\") pod \"nova-cell0-conductor-db-sync-5kqgs\" (UID: \"42522a9f-0861-47fb-9d66-65039590aeaf\") " pod="openstack/nova-cell0-conductor-db-sync-5kqgs" Nov 26 14:41:36 crc kubenswrapper[5037]: I1126 14:41:36.989939 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jcwcf\" (UniqueName: \"kubernetes.io/projected/42522a9f-0861-47fb-9d66-65039590aeaf-kube-api-access-jcwcf\") pod \"nova-cell0-conductor-db-sync-5kqgs\" (UID: \"42522a9f-0861-47fb-9d66-65039590aeaf\") " pod="openstack/nova-cell0-conductor-db-sync-5kqgs" Nov 26 14:41:37 crc kubenswrapper[5037]: I1126 14:41:37.130769 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-5kqgs" Nov 26 14:41:37 crc kubenswrapper[5037]: I1126 14:41:37.376255 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e","Type":"ContainerStarted","Data":"7193d230cf98d6ea21211158364885315519bd51ee3bfb69a0d77702bb8f27cf"} Nov 26 14:41:37 crc kubenswrapper[5037]: I1126 14:41:37.376625 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e","Type":"ContainerStarted","Data":"88db420c03f6896e0ed120d5226d23a655f6efd3fabff26062f101f8dadad54a"} Nov 26 14:41:37 crc kubenswrapper[5037]: I1126 14:41:37.607323 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-5kqgs"] Nov 26 14:41:37 crc kubenswrapper[5037]: W1126 14:41:37.615457 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod42522a9f_0861_47fb_9d66_65039590aeaf.slice/crio-a755d85b4823ac69b1ba9de5066a0a9e80ad00c311b7ef12369cdcede79927e3 WatchSource:0}: Error finding container a755d85b4823ac69b1ba9de5066a0a9e80ad00c311b7ef12369cdcede79927e3: Status 404 returned error can't find the container with id a755d85b4823ac69b1ba9de5066a0a9e80ad00c311b7ef12369cdcede79927e3 Nov 26 14:41:38 crc kubenswrapper[5037]: I1126 14:41:38.383934 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-5kqgs" event={"ID":"42522a9f-0861-47fb-9d66-65039590aeaf","Type":"ContainerStarted","Data":"a755d85b4823ac69b1ba9de5066a0a9e80ad00c311b7ef12369cdcede79927e3"} Nov 26 14:41:38 crc kubenswrapper[5037]: I1126 14:41:38.386470 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e","Type":"ContainerStarted","Data":"38c16d94870f4c7e1e940ce264a006c31e02b0edcd3c7f37d2a4b79a7684dec0"} Nov 26 14:41:38 crc kubenswrapper[5037]: I1126 14:41:38.410204 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.41018859 podStartE2EDuration="3.41018859s" podCreationTimestamp="2025-11-26 14:41:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:41:38.406231983 +0000 UTC m=+1565.203002177" watchObservedRunningTime="2025-11-26 14:41:38.41018859 +0000 UTC m=+1565.206958774" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.138734 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.218510 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-scripts\") pod \"fcecbf16-4ee2-4c4e-93d3-9013208af0c3\" (UID: \"fcecbf16-4ee2-4c4e-93d3-9013208af0c3\") " Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.218604 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-combined-ca-bundle\") pod \"fcecbf16-4ee2-4c4e-93d3-9013208af0c3\" (UID: \"fcecbf16-4ee2-4c4e-93d3-9013208af0c3\") " Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.218659 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-run-httpd\") pod \"fcecbf16-4ee2-4c4e-93d3-9013208af0c3\" (UID: \"fcecbf16-4ee2-4c4e-93d3-9013208af0c3\") " Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.218699 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-92jpf\" (UniqueName: \"kubernetes.io/projected/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-kube-api-access-92jpf\") pod \"fcecbf16-4ee2-4c4e-93d3-9013208af0c3\" (UID: \"fcecbf16-4ee2-4c4e-93d3-9013208af0c3\") " Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.218719 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-config-data\") pod \"fcecbf16-4ee2-4c4e-93d3-9013208af0c3\" (UID: \"fcecbf16-4ee2-4c4e-93d3-9013208af0c3\") " Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.218857 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-log-httpd\") pod \"fcecbf16-4ee2-4c4e-93d3-9013208af0c3\" (UID: \"fcecbf16-4ee2-4c4e-93d3-9013208af0c3\") " Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.218916 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-sg-core-conf-yaml\") pod \"fcecbf16-4ee2-4c4e-93d3-9013208af0c3\" (UID: \"fcecbf16-4ee2-4c4e-93d3-9013208af0c3\") " Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.219397 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "fcecbf16-4ee2-4c4e-93d3-9013208af0c3" (UID: "fcecbf16-4ee2-4c4e-93d3-9013208af0c3"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.219479 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "fcecbf16-4ee2-4c4e-93d3-9013208af0c3" (UID: "fcecbf16-4ee2-4c4e-93d3-9013208af0c3"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.219852 5037 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.219872 5037 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.224474 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-kube-api-access-92jpf" (OuterVolumeSpecName: "kube-api-access-92jpf") pod "fcecbf16-4ee2-4c4e-93d3-9013208af0c3" (UID: "fcecbf16-4ee2-4c4e-93d3-9013208af0c3"). InnerVolumeSpecName "kube-api-access-92jpf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.245495 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-scripts" (OuterVolumeSpecName: "scripts") pod "fcecbf16-4ee2-4c4e-93d3-9013208af0c3" (UID: "fcecbf16-4ee2-4c4e-93d3-9013208af0c3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.246484 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "fcecbf16-4ee2-4c4e-93d3-9013208af0c3" (UID: "fcecbf16-4ee2-4c4e-93d3-9013208af0c3"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.308099 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fcecbf16-4ee2-4c4e-93d3-9013208af0c3" (UID: "fcecbf16-4ee2-4c4e-93d3-9013208af0c3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.322168 5037 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.322205 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.322242 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-92jpf\" (UniqueName: \"kubernetes.io/projected/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-kube-api-access-92jpf\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.322253 5037 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.329173 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-config-data" (OuterVolumeSpecName: "config-data") pod "fcecbf16-4ee2-4c4e-93d3-9013208af0c3" (UID: "fcecbf16-4ee2-4c4e-93d3-9013208af0c3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.400358 5037 generic.go:334] "Generic (PLEG): container finished" podID="fcecbf16-4ee2-4c4e-93d3-9013208af0c3" containerID="cc76418a53af8d2f96ece89ba75860a696b2833d6e203ace19f86a77935f31e9" exitCode=0 Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.402861 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.404652 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fcecbf16-4ee2-4c4e-93d3-9013208af0c3","Type":"ContainerDied","Data":"cc76418a53af8d2f96ece89ba75860a696b2833d6e203ace19f86a77935f31e9"} Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.404694 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fcecbf16-4ee2-4c4e-93d3-9013208af0c3","Type":"ContainerDied","Data":"1bd028e209776c80516c3bab85d08d487d8052e74973ef25a86ab924ac7f95c9"} Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.404711 5037 scope.go:117] "RemoveContainer" containerID="6239f1a19694ce1098dcaa14ee229609a56d55d79549ebe899c17ddfb2d783c2" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.424345 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fcecbf16-4ee2-4c4e-93d3-9013208af0c3-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.427343 5037 scope.go:117] "RemoveContainer" containerID="ba857add9d098fdaab6bc8f408d70cd8b862d5862e9845cd361db2db8eb2fdd5" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.448802 5037 scope.go:117] "RemoveContainer" containerID="09e8506397472e73770555c2a7ecc07c37b1abcbbc2547e65426ccc640ba0c20" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.453896 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.466821 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.477328 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:41:39 crc kubenswrapper[5037]: E1126 14:41:39.477803 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fcecbf16-4ee2-4c4e-93d3-9013208af0c3" containerName="proxy-httpd" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.477821 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="fcecbf16-4ee2-4c4e-93d3-9013208af0c3" containerName="proxy-httpd" Nov 26 14:41:39 crc kubenswrapper[5037]: E1126 14:41:39.477855 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fcecbf16-4ee2-4c4e-93d3-9013208af0c3" containerName="ceilometer-central-agent" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.477863 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="fcecbf16-4ee2-4c4e-93d3-9013208af0c3" containerName="ceilometer-central-agent" Nov 26 14:41:39 crc kubenswrapper[5037]: E1126 14:41:39.477872 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fcecbf16-4ee2-4c4e-93d3-9013208af0c3" containerName="sg-core" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.477879 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="fcecbf16-4ee2-4c4e-93d3-9013208af0c3" containerName="sg-core" Nov 26 14:41:39 crc kubenswrapper[5037]: E1126 14:41:39.477900 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fcecbf16-4ee2-4c4e-93d3-9013208af0c3" containerName="ceilometer-notification-agent" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.477908 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="fcecbf16-4ee2-4c4e-93d3-9013208af0c3" containerName="ceilometer-notification-agent" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.478128 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="fcecbf16-4ee2-4c4e-93d3-9013208af0c3" containerName="ceilometer-central-agent" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.478145 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="fcecbf16-4ee2-4c4e-93d3-9013208af0c3" containerName="ceilometer-notification-agent" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.478159 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="fcecbf16-4ee2-4c4e-93d3-9013208af0c3" containerName="sg-core" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.478167 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="fcecbf16-4ee2-4c4e-93d3-9013208af0c3" containerName="proxy-httpd" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.480240 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.493364 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.494831 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.495099 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.498138 5037 scope.go:117] "RemoveContainer" containerID="cc76418a53af8d2f96ece89ba75860a696b2833d6e203ace19f86a77935f31e9" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.529655 5037 scope.go:117] "RemoveContainer" containerID="6239f1a19694ce1098dcaa14ee229609a56d55d79549ebe899c17ddfb2d783c2" Nov 26 14:41:39 crc kubenswrapper[5037]: E1126 14:41:39.530389 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6239f1a19694ce1098dcaa14ee229609a56d55d79549ebe899c17ddfb2d783c2\": container with ID starting with 6239f1a19694ce1098dcaa14ee229609a56d55d79549ebe899c17ddfb2d783c2 not found: ID does not exist" containerID="6239f1a19694ce1098dcaa14ee229609a56d55d79549ebe899c17ddfb2d783c2" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.530422 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6239f1a19694ce1098dcaa14ee229609a56d55d79549ebe899c17ddfb2d783c2"} err="failed to get container status \"6239f1a19694ce1098dcaa14ee229609a56d55d79549ebe899c17ddfb2d783c2\": rpc error: code = NotFound desc = could not find container \"6239f1a19694ce1098dcaa14ee229609a56d55d79549ebe899c17ddfb2d783c2\": container with ID starting with 6239f1a19694ce1098dcaa14ee229609a56d55d79549ebe899c17ddfb2d783c2 not found: ID does not exist" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.530443 5037 scope.go:117] "RemoveContainer" containerID="ba857add9d098fdaab6bc8f408d70cd8b862d5862e9845cd361db2db8eb2fdd5" Nov 26 14:41:39 crc kubenswrapper[5037]: E1126 14:41:39.530742 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba857add9d098fdaab6bc8f408d70cd8b862d5862e9845cd361db2db8eb2fdd5\": container with ID starting with ba857add9d098fdaab6bc8f408d70cd8b862d5862e9845cd361db2db8eb2fdd5 not found: ID does not exist" containerID="ba857add9d098fdaab6bc8f408d70cd8b862d5862e9845cd361db2db8eb2fdd5" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.530764 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba857add9d098fdaab6bc8f408d70cd8b862d5862e9845cd361db2db8eb2fdd5"} err="failed to get container status \"ba857add9d098fdaab6bc8f408d70cd8b862d5862e9845cd361db2db8eb2fdd5\": rpc error: code = NotFound desc = could not find container \"ba857add9d098fdaab6bc8f408d70cd8b862d5862e9845cd361db2db8eb2fdd5\": container with ID starting with ba857add9d098fdaab6bc8f408d70cd8b862d5862e9845cd361db2db8eb2fdd5 not found: ID does not exist" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.530778 5037 scope.go:117] "RemoveContainer" containerID="09e8506397472e73770555c2a7ecc07c37b1abcbbc2547e65426ccc640ba0c20" Nov 26 14:41:39 crc kubenswrapper[5037]: E1126 14:41:39.530953 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"09e8506397472e73770555c2a7ecc07c37b1abcbbc2547e65426ccc640ba0c20\": container with ID starting with 09e8506397472e73770555c2a7ecc07c37b1abcbbc2547e65426ccc640ba0c20 not found: ID does not exist" containerID="09e8506397472e73770555c2a7ecc07c37b1abcbbc2547e65426ccc640ba0c20" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.530973 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"09e8506397472e73770555c2a7ecc07c37b1abcbbc2547e65426ccc640ba0c20"} err="failed to get container status \"09e8506397472e73770555c2a7ecc07c37b1abcbbc2547e65426ccc640ba0c20\": rpc error: code = NotFound desc = could not find container \"09e8506397472e73770555c2a7ecc07c37b1abcbbc2547e65426ccc640ba0c20\": container with ID starting with 09e8506397472e73770555c2a7ecc07c37b1abcbbc2547e65426ccc640ba0c20 not found: ID does not exist" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.530985 5037 scope.go:117] "RemoveContainer" containerID="cc76418a53af8d2f96ece89ba75860a696b2833d6e203ace19f86a77935f31e9" Nov 26 14:41:39 crc kubenswrapper[5037]: E1126 14:41:39.531167 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cc76418a53af8d2f96ece89ba75860a696b2833d6e203ace19f86a77935f31e9\": container with ID starting with cc76418a53af8d2f96ece89ba75860a696b2833d6e203ace19f86a77935f31e9 not found: ID does not exist" containerID="cc76418a53af8d2f96ece89ba75860a696b2833d6e203ace19f86a77935f31e9" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.531217 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cc76418a53af8d2f96ece89ba75860a696b2833d6e203ace19f86a77935f31e9"} err="failed to get container status \"cc76418a53af8d2f96ece89ba75860a696b2833d6e203ace19f86a77935f31e9\": rpc error: code = NotFound desc = could not find container \"cc76418a53af8d2f96ece89ba75860a696b2833d6e203ace19f86a77935f31e9\": container with ID starting with cc76418a53af8d2f96ece89ba75860a696b2833d6e203ace19f86a77935f31e9 not found: ID does not exist" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.631241 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"59dc0d33-5ee1-46e0-b0c4-2b91075497bd\") " pod="openstack/ceilometer-0" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.631380 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-scripts\") pod \"ceilometer-0\" (UID: \"59dc0d33-5ee1-46e0-b0c4-2b91075497bd\") " pod="openstack/ceilometer-0" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.631434 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-config-data\") pod \"ceilometer-0\" (UID: \"59dc0d33-5ee1-46e0-b0c4-2b91075497bd\") " pod="openstack/ceilometer-0" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.631459 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-49n6v\" (UniqueName: \"kubernetes.io/projected/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-kube-api-access-49n6v\") pod \"ceilometer-0\" (UID: \"59dc0d33-5ee1-46e0-b0c4-2b91075497bd\") " pod="openstack/ceilometer-0" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.631598 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-log-httpd\") pod \"ceilometer-0\" (UID: \"59dc0d33-5ee1-46e0-b0c4-2b91075497bd\") " pod="openstack/ceilometer-0" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.631666 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-run-httpd\") pod \"ceilometer-0\" (UID: \"59dc0d33-5ee1-46e0-b0c4-2b91075497bd\") " pod="openstack/ceilometer-0" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.631684 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"59dc0d33-5ee1-46e0-b0c4-2b91075497bd\") " pod="openstack/ceilometer-0" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.733096 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"59dc0d33-5ee1-46e0-b0c4-2b91075497bd\") " pod="openstack/ceilometer-0" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.733163 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-scripts\") pod \"ceilometer-0\" (UID: \"59dc0d33-5ee1-46e0-b0c4-2b91075497bd\") " pod="openstack/ceilometer-0" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.733198 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-config-data\") pod \"ceilometer-0\" (UID: \"59dc0d33-5ee1-46e0-b0c4-2b91075497bd\") " pod="openstack/ceilometer-0" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.733225 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-49n6v\" (UniqueName: \"kubernetes.io/projected/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-kube-api-access-49n6v\") pod \"ceilometer-0\" (UID: \"59dc0d33-5ee1-46e0-b0c4-2b91075497bd\") " pod="openstack/ceilometer-0" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.733308 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-log-httpd\") pod \"ceilometer-0\" (UID: \"59dc0d33-5ee1-46e0-b0c4-2b91075497bd\") " pod="openstack/ceilometer-0" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.733351 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-run-httpd\") pod \"ceilometer-0\" (UID: \"59dc0d33-5ee1-46e0-b0c4-2b91075497bd\") " pod="openstack/ceilometer-0" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.733374 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"59dc0d33-5ee1-46e0-b0c4-2b91075497bd\") " pod="openstack/ceilometer-0" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.734234 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-log-httpd\") pod \"ceilometer-0\" (UID: \"59dc0d33-5ee1-46e0-b0c4-2b91075497bd\") " pod="openstack/ceilometer-0" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.734428 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-run-httpd\") pod \"ceilometer-0\" (UID: \"59dc0d33-5ee1-46e0-b0c4-2b91075497bd\") " pod="openstack/ceilometer-0" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.737006 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"59dc0d33-5ee1-46e0-b0c4-2b91075497bd\") " pod="openstack/ceilometer-0" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.737301 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-scripts\") pod \"ceilometer-0\" (UID: \"59dc0d33-5ee1-46e0-b0c4-2b91075497bd\") " pod="openstack/ceilometer-0" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.738984 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-config-data\") pod \"ceilometer-0\" (UID: \"59dc0d33-5ee1-46e0-b0c4-2b91075497bd\") " pod="openstack/ceilometer-0" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.742382 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"59dc0d33-5ee1-46e0-b0c4-2b91075497bd\") " pod="openstack/ceilometer-0" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.751336 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-49n6v\" (UniqueName: \"kubernetes.io/projected/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-kube-api-access-49n6v\") pod \"ceilometer-0\" (UID: \"59dc0d33-5ee1-46e0-b0c4-2b91075497bd\") " pod="openstack/ceilometer-0" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.811922 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 14:41:39 crc kubenswrapper[5037]: I1126 14:41:39.975668 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fcecbf16-4ee2-4c4e-93d3-9013208af0c3" path="/var/lib/kubelet/pods/fcecbf16-4ee2-4c4e-93d3-9013208af0c3/volumes" Nov 26 14:41:40 crc kubenswrapper[5037]: I1126 14:41:40.311925 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:41:40 crc kubenswrapper[5037]: I1126 14:41:40.413716 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"59dc0d33-5ee1-46e0-b0c4-2b91075497bd","Type":"ContainerStarted","Data":"5b20527e0264ce85475ad5f000593ca875df89f8a8246949c9433f212481a4d2"} Nov 26 14:41:40 crc kubenswrapper[5037]: I1126 14:41:40.586990 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:41:41 crc kubenswrapper[5037]: I1126 14:41:41.425354 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"59dc0d33-5ee1-46e0-b0c4-2b91075497bd","Type":"ContainerStarted","Data":"9b38de542599782271cf404e7072285c47c83e55039619b9adb0ef3caa9bc5e4"} Nov 26 14:41:43 crc kubenswrapper[5037]: I1126 14:41:43.723412 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 26 14:41:43 crc kubenswrapper[5037]: I1126 14:41:43.723995 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 26 14:41:43 crc kubenswrapper[5037]: I1126 14:41:43.760913 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 26 14:41:43 crc kubenswrapper[5037]: I1126 14:41:43.768686 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 26 14:41:44 crc kubenswrapper[5037]: I1126 14:41:44.458468 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 26 14:41:44 crc kubenswrapper[5037]: I1126 14:41:44.458595 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 26 14:41:45 crc kubenswrapper[5037]: I1126 14:41:45.871334 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 26 14:41:45 crc kubenswrapper[5037]: I1126 14:41:45.871753 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 26 14:41:45 crc kubenswrapper[5037]: I1126 14:41:45.902107 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 26 14:41:45 crc kubenswrapper[5037]: I1126 14:41:45.920056 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 26 14:41:46 crc kubenswrapper[5037]: I1126 14:41:46.483168 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"59dc0d33-5ee1-46e0-b0c4-2b91075497bd","Type":"ContainerStarted","Data":"2bb75eba3ba7b08cae7a86bf78560b6f05aad0d9bc4d47b9eadfe65e1d5db1b3"} Nov 26 14:41:46 crc kubenswrapper[5037]: I1126 14:41:46.484683 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-5kqgs" event={"ID":"42522a9f-0861-47fb-9d66-65039590aeaf","Type":"ContainerStarted","Data":"4dc64289955c86f3b4c372cac08b43c9e8623cbbe4573778b4e74b03bcc33650"} Nov 26 14:41:46 crc kubenswrapper[5037]: I1126 14:41:46.484764 5037 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 14:41:46 crc kubenswrapper[5037]: I1126 14:41:46.484780 5037 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 14:41:46 crc kubenswrapper[5037]: I1126 14:41:46.485277 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 26 14:41:46 crc kubenswrapper[5037]: I1126 14:41:46.485603 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 26 14:41:46 crc kubenswrapper[5037]: I1126 14:41:46.509121 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-5kqgs" podStartSLOduration=2.207297527 podStartE2EDuration="10.509104408s" podCreationTimestamp="2025-11-26 14:41:36 +0000 UTC" firstStartedPulling="2025-11-26 14:41:37.618831284 +0000 UTC m=+1564.415601458" lastFinishedPulling="2025-11-26 14:41:45.920638155 +0000 UTC m=+1572.717408339" observedRunningTime="2025-11-26 14:41:46.500427317 +0000 UTC m=+1573.297197501" watchObservedRunningTime="2025-11-26 14:41:46.509104408 +0000 UTC m=+1573.305874582" Nov 26 14:41:46 crc kubenswrapper[5037]: I1126 14:41:46.675423 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 26 14:41:46 crc kubenswrapper[5037]: I1126 14:41:46.686611 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 26 14:41:46 crc kubenswrapper[5037]: I1126 14:41:46.908465 5037 scope.go:117] "RemoveContainer" containerID="5e69d7717514aa68d798cc4f8eee9b2d5d3e9666ca3b110c2cb4c6b90f9e1181" Nov 26 14:41:46 crc kubenswrapper[5037]: E1126 14:41:46.908750 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:41:47 crc kubenswrapper[5037]: I1126 14:41:47.496559 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"59dc0d33-5ee1-46e0-b0c4-2b91075497bd","Type":"ContainerStarted","Data":"7715aab728710ea0bd793247d4785d1b75ed16c0aaf00ab09eeaffe873acac4f"} Nov 26 14:41:48 crc kubenswrapper[5037]: I1126 14:41:48.465172 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 26 14:41:48 crc kubenswrapper[5037]: I1126 14:41:48.512532 5037 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 14:41:48 crc kubenswrapper[5037]: I1126 14:41:48.513462 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="59dc0d33-5ee1-46e0-b0c4-2b91075497bd" containerName="ceilometer-central-agent" containerID="cri-o://9b38de542599782271cf404e7072285c47c83e55039619b9adb0ef3caa9bc5e4" gracePeriod=30 Nov 26 14:41:48 crc kubenswrapper[5037]: I1126 14:41:48.513762 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"59dc0d33-5ee1-46e0-b0c4-2b91075497bd","Type":"ContainerStarted","Data":"6d5347bc2cf2f7e97104fd17fac25236fbc6063874712967a96d97396a354cb1"} Nov 26 14:41:48 crc kubenswrapper[5037]: I1126 14:41:48.514337 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 14:41:48 crc kubenswrapper[5037]: I1126 14:41:48.514656 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="59dc0d33-5ee1-46e0-b0c4-2b91075497bd" containerName="proxy-httpd" containerID="cri-o://6d5347bc2cf2f7e97104fd17fac25236fbc6063874712967a96d97396a354cb1" gracePeriod=30 Nov 26 14:41:48 crc kubenswrapper[5037]: I1126 14:41:48.514725 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="59dc0d33-5ee1-46e0-b0c4-2b91075497bd" containerName="sg-core" containerID="cri-o://7715aab728710ea0bd793247d4785d1b75ed16c0aaf00ab09eeaffe873acac4f" gracePeriod=30 Nov 26 14:41:48 crc kubenswrapper[5037]: I1126 14:41:48.514765 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="59dc0d33-5ee1-46e0-b0c4-2b91075497bd" containerName="ceilometer-notification-agent" containerID="cri-o://2bb75eba3ba7b08cae7a86bf78560b6f05aad0d9bc4d47b9eadfe65e1d5db1b3" gracePeriod=30 Nov 26 14:41:48 crc kubenswrapper[5037]: I1126 14:41:48.544684 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.852682095 podStartE2EDuration="9.544663632s" podCreationTimestamp="2025-11-26 14:41:39 +0000 UTC" firstStartedPulling="2025-11-26 14:41:40.333582746 +0000 UTC m=+1567.130352930" lastFinishedPulling="2025-11-26 14:41:48.025564283 +0000 UTC m=+1574.822334467" observedRunningTime="2025-11-26 14:41:48.540400168 +0000 UTC m=+1575.337170372" watchObservedRunningTime="2025-11-26 14:41:48.544663632 +0000 UTC m=+1575.341433826" Nov 26 14:41:48 crc kubenswrapper[5037]: I1126 14:41:48.617812 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 26 14:41:48 crc kubenswrapper[5037]: E1126 14:41:48.759098 5037 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod59dc0d33_5ee1_46e0_b0c4_2b91075497bd.slice/crio-7715aab728710ea0bd793247d4785d1b75ed16c0aaf00ab09eeaffe873acac4f.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod59dc0d33_5ee1_46e0_b0c4_2b91075497bd.slice/crio-conmon-7715aab728710ea0bd793247d4785d1b75ed16c0aaf00ab09eeaffe873acac4f.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod59dc0d33_5ee1_46e0_b0c4_2b91075497bd.slice/crio-6d5347bc2cf2f7e97104fd17fac25236fbc6063874712967a96d97396a354cb1.scope\": RecentStats: unable to find data in memory cache]" Nov 26 14:41:49 crc kubenswrapper[5037]: I1126 14:41:49.525027 5037 generic.go:334] "Generic (PLEG): container finished" podID="59dc0d33-5ee1-46e0-b0c4-2b91075497bd" containerID="6d5347bc2cf2f7e97104fd17fac25236fbc6063874712967a96d97396a354cb1" exitCode=0 Nov 26 14:41:49 crc kubenswrapper[5037]: I1126 14:41:49.525318 5037 generic.go:334] "Generic (PLEG): container finished" podID="59dc0d33-5ee1-46e0-b0c4-2b91075497bd" containerID="7715aab728710ea0bd793247d4785d1b75ed16c0aaf00ab09eeaffe873acac4f" exitCode=2 Nov 26 14:41:49 crc kubenswrapper[5037]: I1126 14:41:49.525095 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"59dc0d33-5ee1-46e0-b0c4-2b91075497bd","Type":"ContainerDied","Data":"6d5347bc2cf2f7e97104fd17fac25236fbc6063874712967a96d97396a354cb1"} Nov 26 14:41:49 crc kubenswrapper[5037]: I1126 14:41:49.525423 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"59dc0d33-5ee1-46e0-b0c4-2b91075497bd","Type":"ContainerDied","Data":"7715aab728710ea0bd793247d4785d1b75ed16c0aaf00ab09eeaffe873acac4f"} Nov 26 14:41:50 crc kubenswrapper[5037]: I1126 14:41:50.542360 5037 generic.go:334] "Generic (PLEG): container finished" podID="59dc0d33-5ee1-46e0-b0c4-2b91075497bd" containerID="2bb75eba3ba7b08cae7a86bf78560b6f05aad0d9bc4d47b9eadfe65e1d5db1b3" exitCode=0 Nov 26 14:41:50 crc kubenswrapper[5037]: I1126 14:41:50.542429 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"59dc0d33-5ee1-46e0-b0c4-2b91075497bd","Type":"ContainerDied","Data":"2bb75eba3ba7b08cae7a86bf78560b6f05aad0d9bc4d47b9eadfe65e1d5db1b3"} Nov 26 14:41:52 crc kubenswrapper[5037]: I1126 14:41:52.633494 5037 generic.go:334] "Generic (PLEG): container finished" podID="59dc0d33-5ee1-46e0-b0c4-2b91075497bd" containerID="9b38de542599782271cf404e7072285c47c83e55039619b9adb0ef3caa9bc5e4" exitCode=0 Nov 26 14:41:52 crc kubenswrapper[5037]: I1126 14:41:52.633934 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"59dc0d33-5ee1-46e0-b0c4-2b91075497bd","Type":"ContainerDied","Data":"9b38de542599782271cf404e7072285c47c83e55039619b9adb0ef3caa9bc5e4"} Nov 26 14:41:52 crc kubenswrapper[5037]: I1126 14:41:52.793531 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 14:41:52 crc kubenswrapper[5037]: I1126 14:41:52.893715 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-run-httpd\") pod \"59dc0d33-5ee1-46e0-b0c4-2b91075497bd\" (UID: \"59dc0d33-5ee1-46e0-b0c4-2b91075497bd\") " Nov 26 14:41:52 crc kubenswrapper[5037]: I1126 14:41:52.893849 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-combined-ca-bundle\") pod \"59dc0d33-5ee1-46e0-b0c4-2b91075497bd\" (UID: \"59dc0d33-5ee1-46e0-b0c4-2b91075497bd\") " Nov 26 14:41:52 crc kubenswrapper[5037]: I1126 14:41:52.893935 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-sg-core-conf-yaml\") pod \"59dc0d33-5ee1-46e0-b0c4-2b91075497bd\" (UID: \"59dc0d33-5ee1-46e0-b0c4-2b91075497bd\") " Nov 26 14:41:52 crc kubenswrapper[5037]: I1126 14:41:52.893962 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-log-httpd\") pod \"59dc0d33-5ee1-46e0-b0c4-2b91075497bd\" (UID: \"59dc0d33-5ee1-46e0-b0c4-2b91075497bd\") " Nov 26 14:41:52 crc kubenswrapper[5037]: I1126 14:41:52.894037 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-49n6v\" (UniqueName: \"kubernetes.io/projected/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-kube-api-access-49n6v\") pod \"59dc0d33-5ee1-46e0-b0c4-2b91075497bd\" (UID: \"59dc0d33-5ee1-46e0-b0c4-2b91075497bd\") " Nov 26 14:41:52 crc kubenswrapper[5037]: I1126 14:41:52.894089 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-config-data\") pod \"59dc0d33-5ee1-46e0-b0c4-2b91075497bd\" (UID: \"59dc0d33-5ee1-46e0-b0c4-2b91075497bd\") " Nov 26 14:41:52 crc kubenswrapper[5037]: I1126 14:41:52.894157 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-scripts\") pod \"59dc0d33-5ee1-46e0-b0c4-2b91075497bd\" (UID: \"59dc0d33-5ee1-46e0-b0c4-2b91075497bd\") " Nov 26 14:41:52 crc kubenswrapper[5037]: I1126 14:41:52.894261 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "59dc0d33-5ee1-46e0-b0c4-2b91075497bd" (UID: "59dc0d33-5ee1-46e0-b0c4-2b91075497bd"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:41:52 crc kubenswrapper[5037]: I1126 14:41:52.894418 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "59dc0d33-5ee1-46e0-b0c4-2b91075497bd" (UID: "59dc0d33-5ee1-46e0-b0c4-2b91075497bd"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:41:52 crc kubenswrapper[5037]: I1126 14:41:52.894609 5037 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:52 crc kubenswrapper[5037]: I1126 14:41:52.894627 5037 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:52 crc kubenswrapper[5037]: I1126 14:41:52.899441 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-scripts" (OuterVolumeSpecName: "scripts") pod "59dc0d33-5ee1-46e0-b0c4-2b91075497bd" (UID: "59dc0d33-5ee1-46e0-b0c4-2b91075497bd"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:52 crc kubenswrapper[5037]: I1126 14:41:52.904502 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-kube-api-access-49n6v" (OuterVolumeSpecName: "kube-api-access-49n6v") pod "59dc0d33-5ee1-46e0-b0c4-2b91075497bd" (UID: "59dc0d33-5ee1-46e0-b0c4-2b91075497bd"). InnerVolumeSpecName "kube-api-access-49n6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:41:52 crc kubenswrapper[5037]: I1126 14:41:52.920785 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "59dc0d33-5ee1-46e0-b0c4-2b91075497bd" (UID: "59dc0d33-5ee1-46e0-b0c4-2b91075497bd"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:52 crc kubenswrapper[5037]: I1126 14:41:52.964440 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "59dc0d33-5ee1-46e0-b0c4-2b91075497bd" (UID: "59dc0d33-5ee1-46e0-b0c4-2b91075497bd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:52 crc kubenswrapper[5037]: I1126 14:41:52.990626 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-config-data" (OuterVolumeSpecName: "config-data") pod "59dc0d33-5ee1-46e0-b0c4-2b91075497bd" (UID: "59dc0d33-5ee1-46e0-b0c4-2b91075497bd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:41:52 crc kubenswrapper[5037]: I1126 14:41:52.996823 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-49n6v\" (UniqueName: \"kubernetes.io/projected/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-kube-api-access-49n6v\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:52 crc kubenswrapper[5037]: I1126 14:41:52.996857 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:52 crc kubenswrapper[5037]: I1126 14:41:52.996866 5037 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:52 crc kubenswrapper[5037]: I1126 14:41:52.996876 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:52 crc kubenswrapper[5037]: I1126 14:41:52.996886 5037 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/59dc0d33-5ee1-46e0-b0c4-2b91075497bd-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.644684 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"59dc0d33-5ee1-46e0-b0c4-2b91075497bd","Type":"ContainerDied","Data":"5b20527e0264ce85475ad5f000593ca875df89f8a8246949c9433f212481a4d2"} Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.644743 5037 scope.go:117] "RemoveContainer" containerID="6d5347bc2cf2f7e97104fd17fac25236fbc6063874712967a96d97396a354cb1" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.644744 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.667324 5037 scope.go:117] "RemoveContainer" containerID="7715aab728710ea0bd793247d4785d1b75ed16c0aaf00ab09eeaffe873acac4f" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.687581 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.703099 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.708569 5037 scope.go:117] "RemoveContainer" containerID="2bb75eba3ba7b08cae7a86bf78560b6f05aad0d9bc4d47b9eadfe65e1d5db1b3" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.712257 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:41:53 crc kubenswrapper[5037]: E1126 14:41:53.712621 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59dc0d33-5ee1-46e0-b0c4-2b91075497bd" containerName="sg-core" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.712635 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="59dc0d33-5ee1-46e0-b0c4-2b91075497bd" containerName="sg-core" Nov 26 14:41:53 crc kubenswrapper[5037]: E1126 14:41:53.712645 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59dc0d33-5ee1-46e0-b0c4-2b91075497bd" containerName="proxy-httpd" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.712651 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="59dc0d33-5ee1-46e0-b0c4-2b91075497bd" containerName="proxy-httpd" Nov 26 14:41:53 crc kubenswrapper[5037]: E1126 14:41:53.712678 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59dc0d33-5ee1-46e0-b0c4-2b91075497bd" containerName="ceilometer-notification-agent" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.712687 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="59dc0d33-5ee1-46e0-b0c4-2b91075497bd" containerName="ceilometer-notification-agent" Nov 26 14:41:53 crc kubenswrapper[5037]: E1126 14:41:53.712696 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59dc0d33-5ee1-46e0-b0c4-2b91075497bd" containerName="ceilometer-central-agent" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.712701 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="59dc0d33-5ee1-46e0-b0c4-2b91075497bd" containerName="ceilometer-central-agent" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.712885 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="59dc0d33-5ee1-46e0-b0c4-2b91075497bd" containerName="ceilometer-notification-agent" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.712907 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="59dc0d33-5ee1-46e0-b0c4-2b91075497bd" containerName="sg-core" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.712923 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="59dc0d33-5ee1-46e0-b0c4-2b91075497bd" containerName="ceilometer-central-agent" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.712943 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="59dc0d33-5ee1-46e0-b0c4-2b91075497bd" containerName="proxy-httpd" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.715497 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.719749 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.719812 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.730155 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.767615 5037 scope.go:117] "RemoveContainer" containerID="9b38de542599782271cf404e7072285c47c83e55039619b9adb0ef3caa9bc5e4" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.811078 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d968815-6164-4fa7-83d2-035b696f148d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9d968815-6164-4fa7-83d2-035b696f148d\") " pod="openstack/ceilometer-0" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.811168 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d968815-6164-4fa7-83d2-035b696f148d-log-httpd\") pod \"ceilometer-0\" (UID: \"9d968815-6164-4fa7-83d2-035b696f148d\") " pod="openstack/ceilometer-0" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.811327 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9d968815-6164-4fa7-83d2-035b696f148d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9d968815-6164-4fa7-83d2-035b696f148d\") " pod="openstack/ceilometer-0" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.811455 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d968815-6164-4fa7-83d2-035b696f148d-scripts\") pod \"ceilometer-0\" (UID: \"9d968815-6164-4fa7-83d2-035b696f148d\") " pod="openstack/ceilometer-0" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.811506 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d968815-6164-4fa7-83d2-035b696f148d-config-data\") pod \"ceilometer-0\" (UID: \"9d968815-6164-4fa7-83d2-035b696f148d\") " pod="openstack/ceilometer-0" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.811604 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5wnz9\" (UniqueName: \"kubernetes.io/projected/9d968815-6164-4fa7-83d2-035b696f148d-kube-api-access-5wnz9\") pod \"ceilometer-0\" (UID: \"9d968815-6164-4fa7-83d2-035b696f148d\") " pod="openstack/ceilometer-0" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.811634 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d968815-6164-4fa7-83d2-035b696f148d-run-httpd\") pod \"ceilometer-0\" (UID: \"9d968815-6164-4fa7-83d2-035b696f148d\") " pod="openstack/ceilometer-0" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.913849 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d968815-6164-4fa7-83d2-035b696f148d-config-data\") pod \"ceilometer-0\" (UID: \"9d968815-6164-4fa7-83d2-035b696f148d\") " pod="openstack/ceilometer-0" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.913970 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5wnz9\" (UniqueName: \"kubernetes.io/projected/9d968815-6164-4fa7-83d2-035b696f148d-kube-api-access-5wnz9\") pod \"ceilometer-0\" (UID: \"9d968815-6164-4fa7-83d2-035b696f148d\") " pod="openstack/ceilometer-0" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.914004 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d968815-6164-4fa7-83d2-035b696f148d-run-httpd\") pod \"ceilometer-0\" (UID: \"9d968815-6164-4fa7-83d2-035b696f148d\") " pod="openstack/ceilometer-0" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.914030 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d968815-6164-4fa7-83d2-035b696f148d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9d968815-6164-4fa7-83d2-035b696f148d\") " pod="openstack/ceilometer-0" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.914076 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d968815-6164-4fa7-83d2-035b696f148d-log-httpd\") pod \"ceilometer-0\" (UID: \"9d968815-6164-4fa7-83d2-035b696f148d\") " pod="openstack/ceilometer-0" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.914110 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9d968815-6164-4fa7-83d2-035b696f148d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9d968815-6164-4fa7-83d2-035b696f148d\") " pod="openstack/ceilometer-0" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.914151 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d968815-6164-4fa7-83d2-035b696f148d-scripts\") pod \"ceilometer-0\" (UID: \"9d968815-6164-4fa7-83d2-035b696f148d\") " pod="openstack/ceilometer-0" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.914917 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d968815-6164-4fa7-83d2-035b696f148d-run-httpd\") pod \"ceilometer-0\" (UID: \"9d968815-6164-4fa7-83d2-035b696f148d\") " pod="openstack/ceilometer-0" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.915511 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d968815-6164-4fa7-83d2-035b696f148d-log-httpd\") pod \"ceilometer-0\" (UID: \"9d968815-6164-4fa7-83d2-035b696f148d\") " pod="openstack/ceilometer-0" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.918905 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9d968815-6164-4fa7-83d2-035b696f148d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9d968815-6164-4fa7-83d2-035b696f148d\") " pod="openstack/ceilometer-0" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.919224 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d968815-6164-4fa7-83d2-035b696f148d-config-data\") pod \"ceilometer-0\" (UID: \"9d968815-6164-4fa7-83d2-035b696f148d\") " pod="openstack/ceilometer-0" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.919365 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d968815-6164-4fa7-83d2-035b696f148d-scripts\") pod \"ceilometer-0\" (UID: \"9d968815-6164-4fa7-83d2-035b696f148d\") " pod="openstack/ceilometer-0" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.920699 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="59dc0d33-5ee1-46e0-b0c4-2b91075497bd" path="/var/lib/kubelet/pods/59dc0d33-5ee1-46e0-b0c4-2b91075497bd/volumes" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.926447 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d968815-6164-4fa7-83d2-035b696f148d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9d968815-6164-4fa7-83d2-035b696f148d\") " pod="openstack/ceilometer-0" Nov 26 14:41:53 crc kubenswrapper[5037]: I1126 14:41:53.952245 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5wnz9\" (UniqueName: \"kubernetes.io/projected/9d968815-6164-4fa7-83d2-035b696f148d-kube-api-access-5wnz9\") pod \"ceilometer-0\" (UID: \"9d968815-6164-4fa7-83d2-035b696f148d\") " pod="openstack/ceilometer-0" Nov 26 14:41:54 crc kubenswrapper[5037]: I1126 14:41:54.036918 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 14:41:54 crc kubenswrapper[5037]: I1126 14:41:54.520985 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:41:54 crc kubenswrapper[5037]: W1126 14:41:54.525655 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9d968815_6164_4fa7_83d2_035b696f148d.slice/crio-23baa7379d57634e437a14a3c42742aa4e73c393ea76d470a2adf5d0df43afcf WatchSource:0}: Error finding container 23baa7379d57634e437a14a3c42742aa4e73c393ea76d470a2adf5d0df43afcf: Status 404 returned error can't find the container with id 23baa7379d57634e437a14a3c42742aa4e73c393ea76d470a2adf5d0df43afcf Nov 26 14:41:54 crc kubenswrapper[5037]: I1126 14:41:54.657207 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9d968815-6164-4fa7-83d2-035b696f148d","Type":"ContainerStarted","Data":"23baa7379d57634e437a14a3c42742aa4e73c393ea76d470a2adf5d0df43afcf"} Nov 26 14:41:55 crc kubenswrapper[5037]: I1126 14:41:55.668461 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9d968815-6164-4fa7-83d2-035b696f148d","Type":"ContainerStarted","Data":"54ed1217ea8de6a4690b225bb980822d31de906395d654b95fad0d274a1b8b5c"} Nov 26 14:41:56 crc kubenswrapper[5037]: I1126 14:41:56.679760 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9d968815-6164-4fa7-83d2-035b696f148d","Type":"ContainerStarted","Data":"a38dba55d4a4dd199020fb608a6996abd84a3b5ffc6d538b6bad80e90d502b76"} Nov 26 14:41:57 crc kubenswrapper[5037]: I1126 14:41:57.689590 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9d968815-6164-4fa7-83d2-035b696f148d","Type":"ContainerStarted","Data":"0a78ad12e15653354527d757d669a9c5862aa88c2deb7e8fb98a1f6e4b597da2"} Nov 26 14:41:59 crc kubenswrapper[5037]: I1126 14:41:59.721620 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9d968815-6164-4fa7-83d2-035b696f148d","Type":"ContainerStarted","Data":"0303fb9fff0d1803e3347db888d277ccd5448fb89cd1d5d855a030ae31164434"} Nov 26 14:41:59 crc kubenswrapper[5037]: I1126 14:41:59.722219 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 14:42:01 crc kubenswrapper[5037]: I1126 14:42:01.907813 5037 scope.go:117] "RemoveContainer" containerID="5e69d7717514aa68d798cc4f8eee9b2d5d3e9666ca3b110c2cb4c6b90f9e1181" Nov 26 14:42:01 crc kubenswrapper[5037]: E1126 14:42:01.908045 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:42:02 crc kubenswrapper[5037]: I1126 14:42:02.747240 5037 generic.go:334] "Generic (PLEG): container finished" podID="42522a9f-0861-47fb-9d66-65039590aeaf" containerID="4dc64289955c86f3b4c372cac08b43c9e8623cbbe4573778b4e74b03bcc33650" exitCode=0 Nov 26 14:42:02 crc kubenswrapper[5037]: I1126 14:42:02.747281 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-5kqgs" event={"ID":"42522a9f-0861-47fb-9d66-65039590aeaf","Type":"ContainerDied","Data":"4dc64289955c86f3b4c372cac08b43c9e8623cbbe4573778b4e74b03bcc33650"} Nov 26 14:42:02 crc kubenswrapper[5037]: I1126 14:42:02.773974 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=5.612491198 podStartE2EDuration="9.773953872s" podCreationTimestamp="2025-11-26 14:41:53 +0000 UTC" firstStartedPulling="2025-11-26 14:41:54.548590507 +0000 UTC m=+1581.345360711" lastFinishedPulling="2025-11-26 14:41:58.710053191 +0000 UTC m=+1585.506823385" observedRunningTime="2025-11-26 14:41:59.755697362 +0000 UTC m=+1586.552467546" watchObservedRunningTime="2025-11-26 14:42:02.773953872 +0000 UTC m=+1589.570724056" Nov 26 14:42:04 crc kubenswrapper[5037]: I1126 14:42:04.128776 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-5kqgs" Nov 26 14:42:04 crc kubenswrapper[5037]: I1126 14:42:04.202483 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/42522a9f-0861-47fb-9d66-65039590aeaf-scripts\") pod \"42522a9f-0861-47fb-9d66-65039590aeaf\" (UID: \"42522a9f-0861-47fb-9d66-65039590aeaf\") " Nov 26 14:42:04 crc kubenswrapper[5037]: I1126 14:42:04.202638 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42522a9f-0861-47fb-9d66-65039590aeaf-combined-ca-bundle\") pod \"42522a9f-0861-47fb-9d66-65039590aeaf\" (UID: \"42522a9f-0861-47fb-9d66-65039590aeaf\") " Nov 26 14:42:04 crc kubenswrapper[5037]: I1126 14:42:04.202801 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42522a9f-0861-47fb-9d66-65039590aeaf-config-data\") pod \"42522a9f-0861-47fb-9d66-65039590aeaf\" (UID: \"42522a9f-0861-47fb-9d66-65039590aeaf\") " Nov 26 14:42:04 crc kubenswrapper[5037]: I1126 14:42:04.202830 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jcwcf\" (UniqueName: \"kubernetes.io/projected/42522a9f-0861-47fb-9d66-65039590aeaf-kube-api-access-jcwcf\") pod \"42522a9f-0861-47fb-9d66-65039590aeaf\" (UID: \"42522a9f-0861-47fb-9d66-65039590aeaf\") " Nov 26 14:42:04 crc kubenswrapper[5037]: I1126 14:42:04.208165 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42522a9f-0861-47fb-9d66-65039590aeaf-kube-api-access-jcwcf" (OuterVolumeSpecName: "kube-api-access-jcwcf") pod "42522a9f-0861-47fb-9d66-65039590aeaf" (UID: "42522a9f-0861-47fb-9d66-65039590aeaf"). InnerVolumeSpecName "kube-api-access-jcwcf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:42:04 crc kubenswrapper[5037]: I1126 14:42:04.210516 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42522a9f-0861-47fb-9d66-65039590aeaf-scripts" (OuterVolumeSpecName: "scripts") pod "42522a9f-0861-47fb-9d66-65039590aeaf" (UID: "42522a9f-0861-47fb-9d66-65039590aeaf"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:42:04 crc kubenswrapper[5037]: I1126 14:42:04.228071 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42522a9f-0861-47fb-9d66-65039590aeaf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "42522a9f-0861-47fb-9d66-65039590aeaf" (UID: "42522a9f-0861-47fb-9d66-65039590aeaf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:42:04 crc kubenswrapper[5037]: I1126 14:42:04.228450 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42522a9f-0861-47fb-9d66-65039590aeaf-config-data" (OuterVolumeSpecName: "config-data") pod "42522a9f-0861-47fb-9d66-65039590aeaf" (UID: "42522a9f-0861-47fb-9d66-65039590aeaf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:42:04 crc kubenswrapper[5037]: I1126 14:42:04.306533 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42522a9f-0861-47fb-9d66-65039590aeaf-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:04 crc kubenswrapper[5037]: I1126 14:42:04.306587 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jcwcf\" (UniqueName: \"kubernetes.io/projected/42522a9f-0861-47fb-9d66-65039590aeaf-kube-api-access-jcwcf\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:04 crc kubenswrapper[5037]: I1126 14:42:04.306611 5037 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/42522a9f-0861-47fb-9d66-65039590aeaf-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:04 crc kubenswrapper[5037]: I1126 14:42:04.306628 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42522a9f-0861-47fb-9d66-65039590aeaf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:04 crc kubenswrapper[5037]: I1126 14:42:04.771153 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-5kqgs" event={"ID":"42522a9f-0861-47fb-9d66-65039590aeaf","Type":"ContainerDied","Data":"a755d85b4823ac69b1ba9de5066a0a9e80ad00c311b7ef12369cdcede79927e3"} Nov 26 14:42:04 crc kubenswrapper[5037]: I1126 14:42:04.771224 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a755d85b4823ac69b1ba9de5066a0a9e80ad00c311b7ef12369cdcede79927e3" Nov 26 14:42:04 crc kubenswrapper[5037]: I1126 14:42:04.771235 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-5kqgs" Nov 26 14:42:05 crc kubenswrapper[5037]: I1126 14:42:05.008231 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 14:42:05 crc kubenswrapper[5037]: E1126 14:42:05.008682 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42522a9f-0861-47fb-9d66-65039590aeaf" containerName="nova-cell0-conductor-db-sync" Nov 26 14:42:05 crc kubenswrapper[5037]: I1126 14:42:05.008701 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="42522a9f-0861-47fb-9d66-65039590aeaf" containerName="nova-cell0-conductor-db-sync" Nov 26 14:42:05 crc kubenswrapper[5037]: I1126 14:42:05.008900 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="42522a9f-0861-47fb-9d66-65039590aeaf" containerName="nova-cell0-conductor-db-sync" Nov 26 14:42:05 crc kubenswrapper[5037]: I1126 14:42:05.009582 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 26 14:42:05 crc kubenswrapper[5037]: I1126 14:42:05.015038 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-dvt6s" Nov 26 14:42:05 crc kubenswrapper[5037]: I1126 14:42:05.015419 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 26 14:42:05 crc kubenswrapper[5037]: I1126 14:42:05.023554 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 14:42:05 crc kubenswrapper[5037]: I1126 14:42:05.121768 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9f72n\" (UniqueName: \"kubernetes.io/projected/8707a232-f648-4795-b250-d29069f26514-kube-api-access-9f72n\") pod \"nova-cell0-conductor-0\" (UID: \"8707a232-f648-4795-b250-d29069f26514\") " pod="openstack/nova-cell0-conductor-0" Nov 26 14:42:05 crc kubenswrapper[5037]: I1126 14:42:05.121889 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8707a232-f648-4795-b250-d29069f26514-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"8707a232-f648-4795-b250-d29069f26514\") " pod="openstack/nova-cell0-conductor-0" Nov 26 14:42:05 crc kubenswrapper[5037]: I1126 14:42:05.121932 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8707a232-f648-4795-b250-d29069f26514-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"8707a232-f648-4795-b250-d29069f26514\") " pod="openstack/nova-cell0-conductor-0" Nov 26 14:42:05 crc kubenswrapper[5037]: I1126 14:42:05.223756 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8707a232-f648-4795-b250-d29069f26514-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"8707a232-f648-4795-b250-d29069f26514\") " pod="openstack/nova-cell0-conductor-0" Nov 26 14:42:05 crc kubenswrapper[5037]: I1126 14:42:05.223824 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8707a232-f648-4795-b250-d29069f26514-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"8707a232-f648-4795-b250-d29069f26514\") " pod="openstack/nova-cell0-conductor-0" Nov 26 14:42:05 crc kubenswrapper[5037]: I1126 14:42:05.223952 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9f72n\" (UniqueName: \"kubernetes.io/projected/8707a232-f648-4795-b250-d29069f26514-kube-api-access-9f72n\") pod \"nova-cell0-conductor-0\" (UID: \"8707a232-f648-4795-b250-d29069f26514\") " pod="openstack/nova-cell0-conductor-0" Nov 26 14:42:05 crc kubenswrapper[5037]: I1126 14:42:05.229371 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8707a232-f648-4795-b250-d29069f26514-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"8707a232-f648-4795-b250-d29069f26514\") " pod="openstack/nova-cell0-conductor-0" Nov 26 14:42:05 crc kubenswrapper[5037]: I1126 14:42:05.230125 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8707a232-f648-4795-b250-d29069f26514-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"8707a232-f648-4795-b250-d29069f26514\") " pod="openstack/nova-cell0-conductor-0" Nov 26 14:42:05 crc kubenswrapper[5037]: I1126 14:42:05.240037 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9f72n\" (UniqueName: \"kubernetes.io/projected/8707a232-f648-4795-b250-d29069f26514-kube-api-access-9f72n\") pod \"nova-cell0-conductor-0\" (UID: \"8707a232-f648-4795-b250-d29069f26514\") " pod="openstack/nova-cell0-conductor-0" Nov 26 14:42:05 crc kubenswrapper[5037]: I1126 14:42:05.385730 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 26 14:42:05 crc kubenswrapper[5037]: I1126 14:42:05.832389 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 14:42:06 crc kubenswrapper[5037]: I1126 14:42:06.792403 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"8707a232-f648-4795-b250-d29069f26514","Type":"ContainerStarted","Data":"b658272682462e675abb3613cfedbdb070fda6a2ec653d911c6d60b0faa08bd2"} Nov 26 14:42:06 crc kubenswrapper[5037]: I1126 14:42:06.792785 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 26 14:42:06 crc kubenswrapper[5037]: I1126 14:42:06.792803 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"8707a232-f648-4795-b250-d29069f26514","Type":"ContainerStarted","Data":"f52dfbc55babebb980f0fe8cc248430dce3d7cd8c75104d1368d16533b7fd51f"} Nov 26 14:42:12 crc kubenswrapper[5037]: I1126 14:42:12.908080 5037 scope.go:117] "RemoveContainer" containerID="5e69d7717514aa68d798cc4f8eee9b2d5d3e9666ca3b110c2cb4c6b90f9e1181" Nov 26 14:42:12 crc kubenswrapper[5037]: E1126 14:42:12.908796 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:42:15 crc kubenswrapper[5037]: I1126 14:42:15.427086 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 26 14:42:15 crc kubenswrapper[5037]: I1126 14:42:15.452682 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=11.452653922 podStartE2EDuration="11.452653922s" podCreationTimestamp="2025-11-26 14:42:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:42:06.81360304 +0000 UTC m=+1593.610373234" watchObservedRunningTime="2025-11-26 14:42:15.452653922 +0000 UTC m=+1602.249424146" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.079730 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-v8rzx"] Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.080868 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-v8rzx" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.083692 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.085077 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.098198 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-v8rzx"] Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.139871 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6069f8f-704d-4d3c-8007-0556c1e38b8d-config-data\") pod \"nova-cell0-cell-mapping-v8rzx\" (UID: \"a6069f8f-704d-4d3c-8007-0556c1e38b8d\") " pod="openstack/nova-cell0-cell-mapping-v8rzx" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.139952 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6069f8f-704d-4d3c-8007-0556c1e38b8d-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-v8rzx\" (UID: \"a6069f8f-704d-4d3c-8007-0556c1e38b8d\") " pod="openstack/nova-cell0-cell-mapping-v8rzx" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.140006 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a6069f8f-704d-4d3c-8007-0556c1e38b8d-scripts\") pod \"nova-cell0-cell-mapping-v8rzx\" (UID: \"a6069f8f-704d-4d3c-8007-0556c1e38b8d\") " pod="openstack/nova-cell0-cell-mapping-v8rzx" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.140084 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qz4n8\" (UniqueName: \"kubernetes.io/projected/a6069f8f-704d-4d3c-8007-0556c1e38b8d-kube-api-access-qz4n8\") pod \"nova-cell0-cell-mapping-v8rzx\" (UID: \"a6069f8f-704d-4d3c-8007-0556c1e38b8d\") " pod="openstack/nova-cell0-cell-mapping-v8rzx" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.220674 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.222166 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.225677 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.234195 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.241685 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6069f8f-704d-4d3c-8007-0556c1e38b8d-config-data\") pod \"nova-cell0-cell-mapping-v8rzx\" (UID: \"a6069f8f-704d-4d3c-8007-0556c1e38b8d\") " pod="openstack/nova-cell0-cell-mapping-v8rzx" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.241740 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6069f8f-704d-4d3c-8007-0556c1e38b8d-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-v8rzx\" (UID: \"a6069f8f-704d-4d3c-8007-0556c1e38b8d\") " pod="openstack/nova-cell0-cell-mapping-v8rzx" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.241780 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a6069f8f-704d-4d3c-8007-0556c1e38b8d-scripts\") pod \"nova-cell0-cell-mapping-v8rzx\" (UID: \"a6069f8f-704d-4d3c-8007-0556c1e38b8d\") " pod="openstack/nova-cell0-cell-mapping-v8rzx" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.241836 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qz4n8\" (UniqueName: \"kubernetes.io/projected/a6069f8f-704d-4d3c-8007-0556c1e38b8d-kube-api-access-qz4n8\") pod \"nova-cell0-cell-mapping-v8rzx\" (UID: \"a6069f8f-704d-4d3c-8007-0556c1e38b8d\") " pod="openstack/nova-cell0-cell-mapping-v8rzx" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.248962 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6069f8f-704d-4d3c-8007-0556c1e38b8d-config-data\") pod \"nova-cell0-cell-mapping-v8rzx\" (UID: \"a6069f8f-704d-4d3c-8007-0556c1e38b8d\") " pod="openstack/nova-cell0-cell-mapping-v8rzx" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.255962 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6069f8f-704d-4d3c-8007-0556c1e38b8d-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-v8rzx\" (UID: \"a6069f8f-704d-4d3c-8007-0556c1e38b8d\") " pod="openstack/nova-cell0-cell-mapping-v8rzx" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.257801 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a6069f8f-704d-4d3c-8007-0556c1e38b8d-scripts\") pod \"nova-cell0-cell-mapping-v8rzx\" (UID: \"a6069f8f-704d-4d3c-8007-0556c1e38b8d\") " pod="openstack/nova-cell0-cell-mapping-v8rzx" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.275236 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qz4n8\" (UniqueName: \"kubernetes.io/projected/a6069f8f-704d-4d3c-8007-0556c1e38b8d-kube-api-access-qz4n8\") pod \"nova-cell0-cell-mapping-v8rzx\" (UID: \"a6069f8f-704d-4d3c-8007-0556c1e38b8d\") " pod="openstack/nova-cell0-cell-mapping-v8rzx" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.344128 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6accd992-74e0-4a92-9886-557b3870fe81-config-data\") pod \"nova-scheduler-0\" (UID: \"6accd992-74e0-4a92-9886-557b3870fe81\") " pod="openstack/nova-scheduler-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.344186 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6accd992-74e0-4a92-9886-557b3870fe81-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6accd992-74e0-4a92-9886-557b3870fe81\") " pod="openstack/nova-scheduler-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.344277 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lx7c9\" (UniqueName: \"kubernetes.io/projected/6accd992-74e0-4a92-9886-557b3870fe81-kube-api-access-lx7c9\") pod \"nova-scheduler-0\" (UID: \"6accd992-74e0-4a92-9886-557b3870fe81\") " pod="openstack/nova-scheduler-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.371266 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.372814 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.375107 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.397615 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.414019 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-v8rzx" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.446609 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6accd992-74e0-4a92-9886-557b3870fe81-config-data\") pod \"nova-scheduler-0\" (UID: \"6accd992-74e0-4a92-9886-557b3870fe81\") " pod="openstack/nova-scheduler-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.447778 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6accd992-74e0-4a92-9886-557b3870fe81-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6accd992-74e0-4a92-9886-557b3870fe81\") " pod="openstack/nova-scheduler-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.447943 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66baebd5-d041-48b1-a668-3492f4a0e22e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"66baebd5-d041-48b1-a668-3492f4a0e22e\") " pod="openstack/nova-api-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.448099 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66baebd5-d041-48b1-a668-3492f4a0e22e-config-data\") pod \"nova-api-0\" (UID: \"66baebd5-d041-48b1-a668-3492f4a0e22e\") " pod="openstack/nova-api-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.448312 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wxjb2\" (UniqueName: \"kubernetes.io/projected/66baebd5-d041-48b1-a668-3492f4a0e22e-kube-api-access-wxjb2\") pod \"nova-api-0\" (UID: \"66baebd5-d041-48b1-a668-3492f4a0e22e\") " pod="openstack/nova-api-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.448444 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lx7c9\" (UniqueName: \"kubernetes.io/projected/6accd992-74e0-4a92-9886-557b3870fe81-kube-api-access-lx7c9\") pod \"nova-scheduler-0\" (UID: \"6accd992-74e0-4a92-9886-557b3870fe81\") " pod="openstack/nova-scheduler-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.449123 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/66baebd5-d041-48b1-a668-3492f4a0e22e-logs\") pod \"nova-api-0\" (UID: \"66baebd5-d041-48b1-a668-3492f4a0e22e\") " pod="openstack/nova-api-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.458431 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6accd992-74e0-4a92-9886-557b3870fe81-config-data\") pod \"nova-scheduler-0\" (UID: \"6accd992-74e0-4a92-9886-557b3870fe81\") " pod="openstack/nova-scheduler-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.458601 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6accd992-74e0-4a92-9886-557b3870fe81-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6accd992-74e0-4a92-9886-557b3870fe81\") " pod="openstack/nova-scheduler-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.487893 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lx7c9\" (UniqueName: \"kubernetes.io/projected/6accd992-74e0-4a92-9886-557b3870fe81-kube-api-access-lx7c9\") pod \"nova-scheduler-0\" (UID: \"6accd992-74e0-4a92-9886-557b3870fe81\") " pod="openstack/nova-scheduler-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.546126 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.548062 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.557210 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.557954 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66baebd5-d041-48b1-a668-3492f4a0e22e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"66baebd5-d041-48b1-a668-3492f4a0e22e\") " pod="openstack/nova-api-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.558012 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66baebd5-d041-48b1-a668-3492f4a0e22e-config-data\") pod \"nova-api-0\" (UID: \"66baebd5-d041-48b1-a668-3492f4a0e22e\") " pod="openstack/nova-api-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.558066 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wxjb2\" (UniqueName: \"kubernetes.io/projected/66baebd5-d041-48b1-a668-3492f4a0e22e-kube-api-access-wxjb2\") pod \"nova-api-0\" (UID: \"66baebd5-d041-48b1-a668-3492f4a0e22e\") " pod="openstack/nova-api-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.558177 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/66baebd5-d041-48b1-a668-3492f4a0e22e-logs\") pod \"nova-api-0\" (UID: \"66baebd5-d041-48b1-a668-3492f4a0e22e\") " pod="openstack/nova-api-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.558730 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/66baebd5-d041-48b1-a668-3492f4a0e22e-logs\") pod \"nova-api-0\" (UID: \"66baebd5-d041-48b1-a668-3492f4a0e22e\") " pod="openstack/nova-api-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.559626 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.567028 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66baebd5-d041-48b1-a668-3492f4a0e22e-config-data\") pod \"nova-api-0\" (UID: \"66baebd5-d041-48b1-a668-3492f4a0e22e\") " pod="openstack/nova-api-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.570116 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66baebd5-d041-48b1-a668-3492f4a0e22e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"66baebd5-d041-48b1-a668-3492f4a0e22e\") " pod="openstack/nova-api-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.570172 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.581328 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.581359 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.582252 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wxjb2\" (UniqueName: \"kubernetes.io/projected/66baebd5-d041-48b1-a668-3492f4a0e22e-kube-api-access-wxjb2\") pod \"nova-api-0\" (UID: \"66baebd5-d041-48b1-a668-3492f4a0e22e\") " pod="openstack/nova-api-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.592756 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.654189 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.665770 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-64dbf5859c-xfcds"] Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.667573 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64dbf5859c-xfcds" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.670986 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87596e56-9580-4393-b653-3cf33e21cc30-config-data\") pod \"nova-metadata-0\" (UID: \"87596e56-9580-4393-b653-3cf33e21cc30\") " pod="openstack/nova-metadata-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.671074 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87596e56-9580-4393-b653-3cf33e21cc30-logs\") pod \"nova-metadata-0\" (UID: \"87596e56-9580-4393-b653-3cf33e21cc30\") " pod="openstack/nova-metadata-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.671307 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87596e56-9580-4393-b653-3cf33e21cc30-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"87596e56-9580-4393-b653-3cf33e21cc30\") " pod="openstack/nova-metadata-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.671331 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qjg87\" (UniqueName: \"kubernetes.io/projected/87596e56-9580-4393-b653-3cf33e21cc30-kube-api-access-qjg87\") pod \"nova-metadata-0\" (UID: \"87596e56-9580-4393-b653-3cf33e21cc30\") " pod="openstack/nova-metadata-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.671354 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b95f8258-ffae-4330-9554-fd42958945cc-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"b95f8258-ffae-4330-9554-fd42958945cc\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.671379 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b95f8258-ffae-4330-9554-fd42958945cc-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"b95f8258-ffae-4330-9554-fd42958945cc\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.671432 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-grjbg\" (UniqueName: \"kubernetes.io/projected/b95f8258-ffae-4330-9554-fd42958945cc-kube-api-access-grjbg\") pod \"nova-cell1-novncproxy-0\" (UID: \"b95f8258-ffae-4330-9554-fd42958945cc\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.694137 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-64dbf5859c-xfcds"] Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.702013 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.774628 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-grjbg\" (UniqueName: \"kubernetes.io/projected/b95f8258-ffae-4330-9554-fd42958945cc-kube-api-access-grjbg\") pod \"nova-cell1-novncproxy-0\" (UID: \"b95f8258-ffae-4330-9554-fd42958945cc\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.774753 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/faa2f8fc-e417-40ea-bd09-280b79a99548-dns-svc\") pod \"dnsmasq-dns-64dbf5859c-xfcds\" (UID: \"faa2f8fc-e417-40ea-bd09-280b79a99548\") " pod="openstack/dnsmasq-dns-64dbf5859c-xfcds" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.774778 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-psdj5\" (UniqueName: \"kubernetes.io/projected/faa2f8fc-e417-40ea-bd09-280b79a99548-kube-api-access-psdj5\") pod \"dnsmasq-dns-64dbf5859c-xfcds\" (UID: \"faa2f8fc-e417-40ea-bd09-280b79a99548\") " pod="openstack/dnsmasq-dns-64dbf5859c-xfcds" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.774805 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87596e56-9580-4393-b653-3cf33e21cc30-config-data\") pod \"nova-metadata-0\" (UID: \"87596e56-9580-4393-b653-3cf33e21cc30\") " pod="openstack/nova-metadata-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.774824 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/faa2f8fc-e417-40ea-bd09-280b79a99548-dns-swift-storage-0\") pod \"dnsmasq-dns-64dbf5859c-xfcds\" (UID: \"faa2f8fc-e417-40ea-bd09-280b79a99548\") " pod="openstack/dnsmasq-dns-64dbf5859c-xfcds" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.774878 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87596e56-9580-4393-b653-3cf33e21cc30-logs\") pod \"nova-metadata-0\" (UID: \"87596e56-9580-4393-b653-3cf33e21cc30\") " pod="openstack/nova-metadata-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.774892 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/faa2f8fc-e417-40ea-bd09-280b79a99548-ovsdbserver-sb\") pod \"dnsmasq-dns-64dbf5859c-xfcds\" (UID: \"faa2f8fc-e417-40ea-bd09-280b79a99548\") " pod="openstack/dnsmasq-dns-64dbf5859c-xfcds" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.774928 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/faa2f8fc-e417-40ea-bd09-280b79a99548-ovsdbserver-nb\") pod \"dnsmasq-dns-64dbf5859c-xfcds\" (UID: \"faa2f8fc-e417-40ea-bd09-280b79a99548\") " pod="openstack/dnsmasq-dns-64dbf5859c-xfcds" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.775503 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/faa2f8fc-e417-40ea-bd09-280b79a99548-config\") pod \"dnsmasq-dns-64dbf5859c-xfcds\" (UID: \"faa2f8fc-e417-40ea-bd09-280b79a99548\") " pod="openstack/dnsmasq-dns-64dbf5859c-xfcds" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.775535 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87596e56-9580-4393-b653-3cf33e21cc30-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"87596e56-9580-4393-b653-3cf33e21cc30\") " pod="openstack/nova-metadata-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.775558 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qjg87\" (UniqueName: \"kubernetes.io/projected/87596e56-9580-4393-b653-3cf33e21cc30-kube-api-access-qjg87\") pod \"nova-metadata-0\" (UID: \"87596e56-9580-4393-b653-3cf33e21cc30\") " pod="openstack/nova-metadata-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.775580 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b95f8258-ffae-4330-9554-fd42958945cc-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"b95f8258-ffae-4330-9554-fd42958945cc\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.775618 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b95f8258-ffae-4330-9554-fd42958945cc-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"b95f8258-ffae-4330-9554-fd42958945cc\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.775871 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87596e56-9580-4393-b653-3cf33e21cc30-logs\") pod \"nova-metadata-0\" (UID: \"87596e56-9580-4393-b653-3cf33e21cc30\") " pod="openstack/nova-metadata-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.783370 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b95f8258-ffae-4330-9554-fd42958945cc-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"b95f8258-ffae-4330-9554-fd42958945cc\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.783885 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87596e56-9580-4393-b653-3cf33e21cc30-config-data\") pod \"nova-metadata-0\" (UID: \"87596e56-9580-4393-b653-3cf33e21cc30\") " pod="openstack/nova-metadata-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.784516 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b95f8258-ffae-4330-9554-fd42958945cc-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"b95f8258-ffae-4330-9554-fd42958945cc\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.784985 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87596e56-9580-4393-b653-3cf33e21cc30-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"87596e56-9580-4393-b653-3cf33e21cc30\") " pod="openstack/nova-metadata-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.794278 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-grjbg\" (UniqueName: \"kubernetes.io/projected/b95f8258-ffae-4330-9554-fd42958945cc-kube-api-access-grjbg\") pod \"nova-cell1-novncproxy-0\" (UID: \"b95f8258-ffae-4330-9554-fd42958945cc\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.797174 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qjg87\" (UniqueName: \"kubernetes.io/projected/87596e56-9580-4393-b653-3cf33e21cc30-kube-api-access-qjg87\") pod \"nova-metadata-0\" (UID: \"87596e56-9580-4393-b653-3cf33e21cc30\") " pod="openstack/nova-metadata-0" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.878125 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/faa2f8fc-e417-40ea-bd09-280b79a99548-dns-svc\") pod \"dnsmasq-dns-64dbf5859c-xfcds\" (UID: \"faa2f8fc-e417-40ea-bd09-280b79a99548\") " pod="openstack/dnsmasq-dns-64dbf5859c-xfcds" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.878185 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-psdj5\" (UniqueName: \"kubernetes.io/projected/faa2f8fc-e417-40ea-bd09-280b79a99548-kube-api-access-psdj5\") pod \"dnsmasq-dns-64dbf5859c-xfcds\" (UID: \"faa2f8fc-e417-40ea-bd09-280b79a99548\") " pod="openstack/dnsmasq-dns-64dbf5859c-xfcds" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.878232 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/faa2f8fc-e417-40ea-bd09-280b79a99548-dns-swift-storage-0\") pod \"dnsmasq-dns-64dbf5859c-xfcds\" (UID: \"faa2f8fc-e417-40ea-bd09-280b79a99548\") " pod="openstack/dnsmasq-dns-64dbf5859c-xfcds" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.878284 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/faa2f8fc-e417-40ea-bd09-280b79a99548-ovsdbserver-sb\") pod \"dnsmasq-dns-64dbf5859c-xfcds\" (UID: \"faa2f8fc-e417-40ea-bd09-280b79a99548\") " pod="openstack/dnsmasq-dns-64dbf5859c-xfcds" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.878339 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/faa2f8fc-e417-40ea-bd09-280b79a99548-ovsdbserver-nb\") pod \"dnsmasq-dns-64dbf5859c-xfcds\" (UID: \"faa2f8fc-e417-40ea-bd09-280b79a99548\") " pod="openstack/dnsmasq-dns-64dbf5859c-xfcds" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.878434 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/faa2f8fc-e417-40ea-bd09-280b79a99548-config\") pod \"dnsmasq-dns-64dbf5859c-xfcds\" (UID: \"faa2f8fc-e417-40ea-bd09-280b79a99548\") " pod="openstack/dnsmasq-dns-64dbf5859c-xfcds" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.880440 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/faa2f8fc-e417-40ea-bd09-280b79a99548-dns-svc\") pod \"dnsmasq-dns-64dbf5859c-xfcds\" (UID: \"faa2f8fc-e417-40ea-bd09-280b79a99548\") " pod="openstack/dnsmasq-dns-64dbf5859c-xfcds" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.882112 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/faa2f8fc-e417-40ea-bd09-280b79a99548-dns-swift-storage-0\") pod \"dnsmasq-dns-64dbf5859c-xfcds\" (UID: \"faa2f8fc-e417-40ea-bd09-280b79a99548\") " pod="openstack/dnsmasq-dns-64dbf5859c-xfcds" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.883231 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/faa2f8fc-e417-40ea-bd09-280b79a99548-ovsdbserver-nb\") pod \"dnsmasq-dns-64dbf5859c-xfcds\" (UID: \"faa2f8fc-e417-40ea-bd09-280b79a99548\") " pod="openstack/dnsmasq-dns-64dbf5859c-xfcds" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.883265 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/faa2f8fc-e417-40ea-bd09-280b79a99548-config\") pod \"dnsmasq-dns-64dbf5859c-xfcds\" (UID: \"faa2f8fc-e417-40ea-bd09-280b79a99548\") " pod="openstack/dnsmasq-dns-64dbf5859c-xfcds" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.883272 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/faa2f8fc-e417-40ea-bd09-280b79a99548-ovsdbserver-sb\") pod \"dnsmasq-dns-64dbf5859c-xfcds\" (UID: \"faa2f8fc-e417-40ea-bd09-280b79a99548\") " pod="openstack/dnsmasq-dns-64dbf5859c-xfcds" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.902240 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-psdj5\" (UniqueName: \"kubernetes.io/projected/faa2f8fc-e417-40ea-bd09-280b79a99548-kube-api-access-psdj5\") pod \"dnsmasq-dns-64dbf5859c-xfcds\" (UID: \"faa2f8fc-e417-40ea-bd09-280b79a99548\") " pod="openstack/dnsmasq-dns-64dbf5859c-xfcds" Nov 26 14:42:16 crc kubenswrapper[5037]: I1126 14:42:16.984445 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 14:42:17 crc kubenswrapper[5037]: I1126 14:42:17.016943 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 14:42:17 crc kubenswrapper[5037]: I1126 14:42:17.028055 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64dbf5859c-xfcds" Nov 26 14:42:17 crc kubenswrapper[5037]: I1126 14:42:17.054544 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-v8rzx"] Nov 26 14:42:17 crc kubenswrapper[5037]: W1126 14:42:17.076139 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda6069f8f_704d_4d3c_8007_0556c1e38b8d.slice/crio-a29056776ac1f70bd6a34833ca93aec40b362f09569e94d68be10d58b68f7043 WatchSource:0}: Error finding container a29056776ac1f70bd6a34833ca93aec40b362f09569e94d68be10d58b68f7043: Status 404 returned error can't find the container with id a29056776ac1f70bd6a34833ca93aec40b362f09569e94d68be10d58b68f7043 Nov 26 14:42:17 crc kubenswrapper[5037]: I1126 14:42:17.146553 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 14:42:17 crc kubenswrapper[5037]: W1126 14:42:17.208085 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6accd992_74e0_4a92_9886_557b3870fe81.slice/crio-145a3a43774f79a83029e4111acc38715f64688bcfc3b1c0edef4eb00e60269e WatchSource:0}: Error finding container 145a3a43774f79a83029e4111acc38715f64688bcfc3b1c0edef4eb00e60269e: Status 404 returned error can't find the container with id 145a3a43774f79a83029e4111acc38715f64688bcfc3b1c0edef4eb00e60269e Nov 26 14:42:17 crc kubenswrapper[5037]: I1126 14:42:17.211905 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 14:42:17 crc kubenswrapper[5037]: I1126 14:42:17.217013 5037 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 14:42:17 crc kubenswrapper[5037]: I1126 14:42:17.273109 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-pthqz"] Nov 26 14:42:17 crc kubenswrapper[5037]: I1126 14:42:17.274422 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-pthqz" Nov 26 14:42:17 crc kubenswrapper[5037]: I1126 14:42:17.282939 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 26 14:42:17 crc kubenswrapper[5037]: I1126 14:42:17.283103 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 26 14:42:17 crc kubenswrapper[5037]: I1126 14:42:17.315001 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-pthqz"] Nov 26 14:42:17 crc kubenswrapper[5037]: I1126 14:42:17.392865 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/013f650e-1eaf-4a38-b62d-5e9efbf8b6b5-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-pthqz\" (UID: \"013f650e-1eaf-4a38-b62d-5e9efbf8b6b5\") " pod="openstack/nova-cell1-conductor-db-sync-pthqz" Nov 26 14:42:17 crc kubenswrapper[5037]: I1126 14:42:17.392925 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/013f650e-1eaf-4a38-b62d-5e9efbf8b6b5-config-data\") pod \"nova-cell1-conductor-db-sync-pthqz\" (UID: \"013f650e-1eaf-4a38-b62d-5e9efbf8b6b5\") " pod="openstack/nova-cell1-conductor-db-sync-pthqz" Nov 26 14:42:17 crc kubenswrapper[5037]: I1126 14:42:17.393059 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/013f650e-1eaf-4a38-b62d-5e9efbf8b6b5-scripts\") pod \"nova-cell1-conductor-db-sync-pthqz\" (UID: \"013f650e-1eaf-4a38-b62d-5e9efbf8b6b5\") " pod="openstack/nova-cell1-conductor-db-sync-pthqz" Nov 26 14:42:17 crc kubenswrapper[5037]: I1126 14:42:17.393334 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6n76t\" (UniqueName: \"kubernetes.io/projected/013f650e-1eaf-4a38-b62d-5e9efbf8b6b5-kube-api-access-6n76t\") pod \"nova-cell1-conductor-db-sync-pthqz\" (UID: \"013f650e-1eaf-4a38-b62d-5e9efbf8b6b5\") " pod="openstack/nova-cell1-conductor-db-sync-pthqz" Nov 26 14:42:17 crc kubenswrapper[5037]: I1126 14:42:17.455722 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 14:42:17 crc kubenswrapper[5037]: I1126 14:42:17.494996 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6n76t\" (UniqueName: \"kubernetes.io/projected/013f650e-1eaf-4a38-b62d-5e9efbf8b6b5-kube-api-access-6n76t\") pod \"nova-cell1-conductor-db-sync-pthqz\" (UID: \"013f650e-1eaf-4a38-b62d-5e9efbf8b6b5\") " pod="openstack/nova-cell1-conductor-db-sync-pthqz" Nov 26 14:42:17 crc kubenswrapper[5037]: I1126 14:42:17.495138 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/013f650e-1eaf-4a38-b62d-5e9efbf8b6b5-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-pthqz\" (UID: \"013f650e-1eaf-4a38-b62d-5e9efbf8b6b5\") " pod="openstack/nova-cell1-conductor-db-sync-pthqz" Nov 26 14:42:17 crc kubenswrapper[5037]: I1126 14:42:17.495173 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/013f650e-1eaf-4a38-b62d-5e9efbf8b6b5-config-data\") pod \"nova-cell1-conductor-db-sync-pthqz\" (UID: \"013f650e-1eaf-4a38-b62d-5e9efbf8b6b5\") " pod="openstack/nova-cell1-conductor-db-sync-pthqz" Nov 26 14:42:17 crc kubenswrapper[5037]: I1126 14:42:17.495980 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/013f650e-1eaf-4a38-b62d-5e9efbf8b6b5-scripts\") pod \"nova-cell1-conductor-db-sync-pthqz\" (UID: \"013f650e-1eaf-4a38-b62d-5e9efbf8b6b5\") " pod="openstack/nova-cell1-conductor-db-sync-pthqz" Nov 26 14:42:17 crc kubenswrapper[5037]: I1126 14:42:17.500679 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/013f650e-1eaf-4a38-b62d-5e9efbf8b6b5-scripts\") pod \"nova-cell1-conductor-db-sync-pthqz\" (UID: \"013f650e-1eaf-4a38-b62d-5e9efbf8b6b5\") " pod="openstack/nova-cell1-conductor-db-sync-pthqz" Nov 26 14:42:17 crc kubenswrapper[5037]: I1126 14:42:17.500826 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/013f650e-1eaf-4a38-b62d-5e9efbf8b6b5-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-pthqz\" (UID: \"013f650e-1eaf-4a38-b62d-5e9efbf8b6b5\") " pod="openstack/nova-cell1-conductor-db-sync-pthqz" Nov 26 14:42:17 crc kubenswrapper[5037]: I1126 14:42:17.503158 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/013f650e-1eaf-4a38-b62d-5e9efbf8b6b5-config-data\") pod \"nova-cell1-conductor-db-sync-pthqz\" (UID: \"013f650e-1eaf-4a38-b62d-5e9efbf8b6b5\") " pod="openstack/nova-cell1-conductor-db-sync-pthqz" Nov 26 14:42:17 crc kubenswrapper[5037]: I1126 14:42:17.513798 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6n76t\" (UniqueName: \"kubernetes.io/projected/013f650e-1eaf-4a38-b62d-5e9efbf8b6b5-kube-api-access-6n76t\") pod \"nova-cell1-conductor-db-sync-pthqz\" (UID: \"013f650e-1eaf-4a38-b62d-5e9efbf8b6b5\") " pod="openstack/nova-cell1-conductor-db-sync-pthqz" Nov 26 14:42:17 crc kubenswrapper[5037]: W1126 14:42:17.574961 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod87596e56_9580_4393_b653_3cf33e21cc30.slice/crio-21263f4488ad7005b255cee1683743da90c7bd438db5b4ae7317a7efc298ca6e WatchSource:0}: Error finding container 21263f4488ad7005b255cee1683743da90c7bd438db5b4ae7317a7efc298ca6e: Status 404 returned error can't find the container with id 21263f4488ad7005b255cee1683743da90c7bd438db5b4ae7317a7efc298ca6e Nov 26 14:42:17 crc kubenswrapper[5037]: I1126 14:42:17.577131 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 14:42:17 crc kubenswrapper[5037]: I1126 14:42:17.642006 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-64dbf5859c-xfcds"] Nov 26 14:42:17 crc kubenswrapper[5037]: I1126 14:42:17.681821 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-pthqz" Nov 26 14:42:17 crc kubenswrapper[5037]: I1126 14:42:17.970619 5037 generic.go:334] "Generic (PLEG): container finished" podID="faa2f8fc-e417-40ea-bd09-280b79a99548" containerID="acb7120d538d04bcd14b10d871a54e95a0fdf3529c741deb7e0d73851f6d275d" exitCode=0 Nov 26 14:42:17 crc kubenswrapper[5037]: I1126 14:42:17.970915 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64dbf5859c-xfcds" event={"ID":"faa2f8fc-e417-40ea-bd09-280b79a99548","Type":"ContainerDied","Data":"acb7120d538d04bcd14b10d871a54e95a0fdf3529c741deb7e0d73851f6d275d"} Nov 26 14:42:17 crc kubenswrapper[5037]: I1126 14:42:17.970944 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64dbf5859c-xfcds" event={"ID":"faa2f8fc-e417-40ea-bd09-280b79a99548","Type":"ContainerStarted","Data":"443ba2ac3c6645f0e65cb70f28f0ba9e4fc5e6808de9467617c1e70f5d13e141"} Nov 26 14:42:17 crc kubenswrapper[5037]: I1126 14:42:17.976213 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"66baebd5-d041-48b1-a668-3492f4a0e22e","Type":"ContainerStarted","Data":"3a7a1b24b0ea9b16d8068861c8a518396b283e724173c7ccb849f7dc56f3d71f"} Nov 26 14:42:17 crc kubenswrapper[5037]: I1126 14:42:17.979373 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6accd992-74e0-4a92-9886-557b3870fe81","Type":"ContainerStarted","Data":"145a3a43774f79a83029e4111acc38715f64688bcfc3b1c0edef4eb00e60269e"} Nov 26 14:42:17 crc kubenswrapper[5037]: I1126 14:42:17.981663 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"b95f8258-ffae-4330-9554-fd42958945cc","Type":"ContainerStarted","Data":"348120f325cec7e3488596365317a76ea18ae6d57f6b9913cfe38b26d7f18fbd"} Nov 26 14:42:17 crc kubenswrapper[5037]: I1126 14:42:17.983127 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"87596e56-9580-4393-b653-3cf33e21cc30","Type":"ContainerStarted","Data":"21263f4488ad7005b255cee1683743da90c7bd438db5b4ae7317a7efc298ca6e"} Nov 26 14:42:17 crc kubenswrapper[5037]: I1126 14:42:17.998891 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-v8rzx" event={"ID":"a6069f8f-704d-4d3c-8007-0556c1e38b8d","Type":"ContainerStarted","Data":"636473aba00f3530f0263e12d2e4b1c92a75ffc63db3c24266a183bce9daaabe"} Nov 26 14:42:17 crc kubenswrapper[5037]: I1126 14:42:17.999158 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-v8rzx" event={"ID":"a6069f8f-704d-4d3c-8007-0556c1e38b8d","Type":"ContainerStarted","Data":"a29056776ac1f70bd6a34833ca93aec40b362f09569e94d68be10d58b68f7043"} Nov 26 14:42:18 crc kubenswrapper[5037]: I1126 14:42:18.023708 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-v8rzx" podStartSLOduration=2.023690367 podStartE2EDuration="2.023690367s" podCreationTimestamp="2025-11-26 14:42:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:42:18.016898611 +0000 UTC m=+1604.813668785" watchObservedRunningTime="2025-11-26 14:42:18.023690367 +0000 UTC m=+1604.820460541" Nov 26 14:42:18 crc kubenswrapper[5037]: I1126 14:42:18.136036 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-pthqz"] Nov 26 14:42:19 crc kubenswrapper[5037]: I1126 14:42:19.015853 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-pthqz" event={"ID":"013f650e-1eaf-4a38-b62d-5e9efbf8b6b5","Type":"ContainerStarted","Data":"181d04aaba3a5e12256cd74fb1a24c1fbeb6893273bb7e2b686e760d59cbdfc5"} Nov 26 14:42:19 crc kubenswrapper[5037]: I1126 14:42:19.016159 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-pthqz" event={"ID":"013f650e-1eaf-4a38-b62d-5e9efbf8b6b5","Type":"ContainerStarted","Data":"875b0c8dda8e5cc1487cf7d5db68a9a6ab3c0aeb9afa42b06d3e0eba23d55488"} Nov 26 14:42:19 crc kubenswrapper[5037]: I1126 14:42:19.020713 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64dbf5859c-xfcds" event={"ID":"faa2f8fc-e417-40ea-bd09-280b79a99548","Type":"ContainerStarted","Data":"7543287d626875e96b7fd10badbb09ba29d86483c98c340761eb4eeae930d754"} Nov 26 14:42:19 crc kubenswrapper[5037]: I1126 14:42:19.021306 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-64dbf5859c-xfcds" Nov 26 14:42:19 crc kubenswrapper[5037]: I1126 14:42:19.044847 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-pthqz" podStartSLOduration=2.04482769 podStartE2EDuration="2.04482769s" podCreationTimestamp="2025-11-26 14:42:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:42:19.035005431 +0000 UTC m=+1605.831775605" watchObservedRunningTime="2025-11-26 14:42:19.04482769 +0000 UTC m=+1605.841597874" Nov 26 14:42:19 crc kubenswrapper[5037]: I1126 14:42:19.055130 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-64dbf5859c-xfcds" podStartSLOduration=3.055111901 podStartE2EDuration="3.055111901s" podCreationTimestamp="2025-11-26 14:42:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:42:19.05385452 +0000 UTC m=+1605.850624714" watchObservedRunningTime="2025-11-26 14:42:19.055111901 +0000 UTC m=+1605.851882085" Nov 26 14:42:20 crc kubenswrapper[5037]: I1126 14:42:20.035494 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 14:42:20 crc kubenswrapper[5037]: I1126 14:42:20.044223 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 14:42:22 crc kubenswrapper[5037]: I1126 14:42:22.051437 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"87596e56-9580-4393-b653-3cf33e21cc30","Type":"ContainerStarted","Data":"d4490cdc702eb284fb560c1fb61da2d26b6df8afaad2620c0681ff6b028fa449"} Nov 26 14:42:22 crc kubenswrapper[5037]: I1126 14:42:22.052186 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"87596e56-9580-4393-b653-3cf33e21cc30","Type":"ContainerStarted","Data":"f7a0d1f48aae5151f234d8d12216b9455594b21fa7a2b6f0992c396c0d1110d5"} Nov 26 14:42:22 crc kubenswrapper[5037]: I1126 14:42:22.051600 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="87596e56-9580-4393-b653-3cf33e21cc30" containerName="nova-metadata-metadata" containerID="cri-o://d4490cdc702eb284fb560c1fb61da2d26b6df8afaad2620c0681ff6b028fa449" gracePeriod=30 Nov 26 14:42:22 crc kubenswrapper[5037]: I1126 14:42:22.051537 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="87596e56-9580-4393-b653-3cf33e21cc30" containerName="nova-metadata-log" containerID="cri-o://f7a0d1f48aae5151f234d8d12216b9455594b21fa7a2b6f0992c396c0d1110d5" gracePeriod=30 Nov 26 14:42:22 crc kubenswrapper[5037]: I1126 14:42:22.057681 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"66baebd5-d041-48b1-a668-3492f4a0e22e","Type":"ContainerStarted","Data":"f70b194c2b10101be9564aab23553689c01fa86d35a1fe41ac655ee1beadf267"} Nov 26 14:42:22 crc kubenswrapper[5037]: I1126 14:42:22.058002 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"66baebd5-d041-48b1-a668-3492f4a0e22e","Type":"ContainerStarted","Data":"8d6a012ccb6b42dcde0ba2d8c506bc30f418692c2a201991371c8169e7a5602d"} Nov 26 14:42:22 crc kubenswrapper[5037]: I1126 14:42:22.060797 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6accd992-74e0-4a92-9886-557b3870fe81","Type":"ContainerStarted","Data":"94f7804ae1bbef2c592db90c0d711750bdc3bc1e226f50e8dfdbf47efb3baae1"} Nov 26 14:42:22 crc kubenswrapper[5037]: I1126 14:42:22.063751 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"b95f8258-ffae-4330-9554-fd42958945cc","Type":"ContainerStarted","Data":"b07cbfa946d18fe5aa6c12bbbf5f9def24ad7c4fb03f3764fc72994f16512cc6"} Nov 26 14:42:22 crc kubenswrapper[5037]: I1126 14:42:22.063897 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="b95f8258-ffae-4330-9554-fd42958945cc" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://b07cbfa946d18fe5aa6c12bbbf5f9def24ad7c4fb03f3764fc72994f16512cc6" gracePeriod=30 Nov 26 14:42:22 crc kubenswrapper[5037]: I1126 14:42:22.093761 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.440659824 podStartE2EDuration="6.093732969s" podCreationTimestamp="2025-11-26 14:42:16 +0000 UTC" firstStartedPulling="2025-11-26 14:42:17.577078305 +0000 UTC m=+1604.373848489" lastFinishedPulling="2025-11-26 14:42:21.23015145 +0000 UTC m=+1608.026921634" observedRunningTime="2025-11-26 14:42:22.075218586 +0000 UTC m=+1608.871988780" watchObservedRunningTime="2025-11-26 14:42:22.093732969 +0000 UTC m=+1608.890503183" Nov 26 14:42:22 crc kubenswrapper[5037]: I1126 14:42:22.107953 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.341976785 podStartE2EDuration="6.107935575s" podCreationTimestamp="2025-11-26 14:42:16 +0000 UTC" firstStartedPulling="2025-11-26 14:42:17.458557302 +0000 UTC m=+1604.255327486" lastFinishedPulling="2025-11-26 14:42:21.224516072 +0000 UTC m=+1608.021286276" observedRunningTime="2025-11-26 14:42:22.105112006 +0000 UTC m=+1608.901882210" watchObservedRunningTime="2025-11-26 14:42:22.107935575 +0000 UTC m=+1608.904705769" Nov 26 14:42:22 crc kubenswrapper[5037]: I1126 14:42:22.119436 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.112428532 podStartE2EDuration="6.119417525s" podCreationTimestamp="2025-11-26 14:42:16 +0000 UTC" firstStartedPulling="2025-11-26 14:42:17.216818392 +0000 UTC m=+1604.013588576" lastFinishedPulling="2025-11-26 14:42:21.223807385 +0000 UTC m=+1608.020577569" observedRunningTime="2025-11-26 14:42:22.119351484 +0000 UTC m=+1608.916121678" watchObservedRunningTime="2025-11-26 14:42:22.119417525 +0000 UTC m=+1608.916187729" Nov 26 14:42:23 crc kubenswrapper[5037]: I1126 14:42:23.077853 5037 generic.go:334] "Generic (PLEG): container finished" podID="87596e56-9580-4393-b653-3cf33e21cc30" containerID="f7a0d1f48aae5151f234d8d12216b9455594b21fa7a2b6f0992c396c0d1110d5" exitCode=143 Nov 26 14:42:23 crc kubenswrapper[5037]: I1126 14:42:23.078052 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"87596e56-9580-4393-b653-3cf33e21cc30","Type":"ContainerDied","Data":"f7a0d1f48aae5151f234d8d12216b9455594b21fa7a2b6f0992c396c0d1110d5"} Nov 26 14:42:24 crc kubenswrapper[5037]: I1126 14:42:24.041272 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 26 14:42:24 crc kubenswrapper[5037]: I1126 14:42:24.071745 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=4.087240783 podStartE2EDuration="8.071669706s" podCreationTimestamp="2025-11-26 14:42:16 +0000 UTC" firstStartedPulling="2025-11-26 14:42:17.240147331 +0000 UTC m=+1604.036917515" lastFinishedPulling="2025-11-26 14:42:21.224576254 +0000 UTC m=+1608.021346438" observedRunningTime="2025-11-26 14:42:22.151765365 +0000 UTC m=+1608.948535569" watchObservedRunningTime="2025-11-26 14:42:24.071669706 +0000 UTC m=+1610.868439890" Nov 26 14:42:26 crc kubenswrapper[5037]: I1126 14:42:26.121461 5037 generic.go:334] "Generic (PLEG): container finished" podID="a6069f8f-704d-4d3c-8007-0556c1e38b8d" containerID="636473aba00f3530f0263e12d2e4b1c92a75ffc63db3c24266a183bce9daaabe" exitCode=0 Nov 26 14:42:26 crc kubenswrapper[5037]: I1126 14:42:26.121543 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-v8rzx" event={"ID":"a6069f8f-704d-4d3c-8007-0556c1e38b8d","Type":"ContainerDied","Data":"636473aba00f3530f0263e12d2e4b1c92a75ffc63db3c24266a183bce9daaabe"} Nov 26 14:42:26 crc kubenswrapper[5037]: I1126 14:42:26.655512 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 26 14:42:26 crc kubenswrapper[5037]: I1126 14:42:26.655832 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 26 14:42:26 crc kubenswrapper[5037]: I1126 14:42:26.691038 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 26 14:42:26 crc kubenswrapper[5037]: I1126 14:42:26.703241 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 14:42:26 crc kubenswrapper[5037]: I1126 14:42:26.703279 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 14:42:26 crc kubenswrapper[5037]: I1126 14:42:26.985613 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 26 14:42:27 crc kubenswrapper[5037]: I1126 14:42:27.018440 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 14:42:27 crc kubenswrapper[5037]: I1126 14:42:27.018494 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 14:42:27 crc kubenswrapper[5037]: I1126 14:42:27.030413 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-64dbf5859c-xfcds" Nov 26 14:42:27 crc kubenswrapper[5037]: I1126 14:42:27.092615 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7965876c4f-xzpq9"] Nov 26 14:42:27 crc kubenswrapper[5037]: I1126 14:42:27.092871 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7965876c4f-xzpq9" podUID="8678a3ca-f406-4732-8478-56f5ea2f6174" containerName="dnsmasq-dns" containerID="cri-o://1132189092b14ee6c21db91aac9b539c44a1f52e5371431b948a4a1b8b523cd1" gracePeriod=10 Nov 26 14:42:27 crc kubenswrapper[5037]: I1126 14:42:27.177604 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 26 14:42:27 crc kubenswrapper[5037]: I1126 14:42:27.737724 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-v8rzx" Nov 26 14:42:27 crc kubenswrapper[5037]: I1126 14:42:27.738229 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7965876c4f-xzpq9" Nov 26 14:42:27 crc kubenswrapper[5037]: I1126 14:42:27.789468 5037 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="66baebd5-d041-48b1-a668-3492f4a0e22e" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.181:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 14:42:27 crc kubenswrapper[5037]: I1126 14:42:27.789471 5037 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="66baebd5-d041-48b1-a668-3492f4a0e22e" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.181:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 14:42:27 crc kubenswrapper[5037]: I1126 14:42:27.816748 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8678a3ca-f406-4732-8478-56f5ea2f6174-ovsdbserver-sb\") pod \"8678a3ca-f406-4732-8478-56f5ea2f6174\" (UID: \"8678a3ca-f406-4732-8478-56f5ea2f6174\") " Nov 26 14:42:27 crc kubenswrapper[5037]: I1126 14:42:27.816789 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6069f8f-704d-4d3c-8007-0556c1e38b8d-config-data\") pod \"a6069f8f-704d-4d3c-8007-0556c1e38b8d\" (UID: \"a6069f8f-704d-4d3c-8007-0556c1e38b8d\") " Nov 26 14:42:27 crc kubenswrapper[5037]: I1126 14:42:27.816848 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qz4n8\" (UniqueName: \"kubernetes.io/projected/a6069f8f-704d-4d3c-8007-0556c1e38b8d-kube-api-access-qz4n8\") pod \"a6069f8f-704d-4d3c-8007-0556c1e38b8d\" (UID: \"a6069f8f-704d-4d3c-8007-0556c1e38b8d\") " Nov 26 14:42:27 crc kubenswrapper[5037]: I1126 14:42:27.816873 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cwlch\" (UniqueName: \"kubernetes.io/projected/8678a3ca-f406-4732-8478-56f5ea2f6174-kube-api-access-cwlch\") pod \"8678a3ca-f406-4732-8478-56f5ea2f6174\" (UID: \"8678a3ca-f406-4732-8478-56f5ea2f6174\") " Nov 26 14:42:27 crc kubenswrapper[5037]: I1126 14:42:27.816956 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8678a3ca-f406-4732-8478-56f5ea2f6174-ovsdbserver-nb\") pod \"8678a3ca-f406-4732-8478-56f5ea2f6174\" (UID: \"8678a3ca-f406-4732-8478-56f5ea2f6174\") " Nov 26 14:42:27 crc kubenswrapper[5037]: I1126 14:42:27.816979 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6069f8f-704d-4d3c-8007-0556c1e38b8d-combined-ca-bundle\") pod \"a6069f8f-704d-4d3c-8007-0556c1e38b8d\" (UID: \"a6069f8f-704d-4d3c-8007-0556c1e38b8d\") " Nov 26 14:42:27 crc kubenswrapper[5037]: I1126 14:42:27.817078 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8678a3ca-f406-4732-8478-56f5ea2f6174-dns-svc\") pod \"8678a3ca-f406-4732-8478-56f5ea2f6174\" (UID: \"8678a3ca-f406-4732-8478-56f5ea2f6174\") " Nov 26 14:42:27 crc kubenswrapper[5037]: I1126 14:42:27.817136 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8678a3ca-f406-4732-8478-56f5ea2f6174-dns-swift-storage-0\") pod \"8678a3ca-f406-4732-8478-56f5ea2f6174\" (UID: \"8678a3ca-f406-4732-8478-56f5ea2f6174\") " Nov 26 14:42:27 crc kubenswrapper[5037]: I1126 14:42:27.817152 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a6069f8f-704d-4d3c-8007-0556c1e38b8d-scripts\") pod \"a6069f8f-704d-4d3c-8007-0556c1e38b8d\" (UID: \"a6069f8f-704d-4d3c-8007-0556c1e38b8d\") " Nov 26 14:42:27 crc kubenswrapper[5037]: I1126 14:42:27.817177 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8678a3ca-f406-4732-8478-56f5ea2f6174-config\") pod \"8678a3ca-f406-4732-8478-56f5ea2f6174\" (UID: \"8678a3ca-f406-4732-8478-56f5ea2f6174\") " Nov 26 14:42:27 crc kubenswrapper[5037]: I1126 14:42:27.833573 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6069f8f-704d-4d3c-8007-0556c1e38b8d-scripts" (OuterVolumeSpecName: "scripts") pod "a6069f8f-704d-4d3c-8007-0556c1e38b8d" (UID: "a6069f8f-704d-4d3c-8007-0556c1e38b8d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:42:27 crc kubenswrapper[5037]: I1126 14:42:27.842541 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8678a3ca-f406-4732-8478-56f5ea2f6174-kube-api-access-cwlch" (OuterVolumeSpecName: "kube-api-access-cwlch") pod "8678a3ca-f406-4732-8478-56f5ea2f6174" (UID: "8678a3ca-f406-4732-8478-56f5ea2f6174"). InnerVolumeSpecName "kube-api-access-cwlch". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:42:27 crc kubenswrapper[5037]: I1126 14:42:27.861686 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6069f8f-704d-4d3c-8007-0556c1e38b8d-kube-api-access-qz4n8" (OuterVolumeSpecName: "kube-api-access-qz4n8") pod "a6069f8f-704d-4d3c-8007-0556c1e38b8d" (UID: "a6069f8f-704d-4d3c-8007-0556c1e38b8d"). InnerVolumeSpecName "kube-api-access-qz4n8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:42:27 crc kubenswrapper[5037]: I1126 14:42:27.905546 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6069f8f-704d-4d3c-8007-0556c1e38b8d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a6069f8f-704d-4d3c-8007-0556c1e38b8d" (UID: "a6069f8f-704d-4d3c-8007-0556c1e38b8d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:42:27 crc kubenswrapper[5037]: I1126 14:42:27.908824 5037 scope.go:117] "RemoveContainer" containerID="5e69d7717514aa68d798cc4f8eee9b2d5d3e9666ca3b110c2cb4c6b90f9e1181" Nov 26 14:42:27 crc kubenswrapper[5037]: E1126 14:42:27.909176 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:42:27 crc kubenswrapper[5037]: I1126 14:42:27.921517 5037 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a6069f8f-704d-4d3c-8007-0556c1e38b8d-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:27 crc kubenswrapper[5037]: I1126 14:42:27.921551 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qz4n8\" (UniqueName: \"kubernetes.io/projected/a6069f8f-704d-4d3c-8007-0556c1e38b8d-kube-api-access-qz4n8\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:27 crc kubenswrapper[5037]: I1126 14:42:27.921564 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cwlch\" (UniqueName: \"kubernetes.io/projected/8678a3ca-f406-4732-8478-56f5ea2f6174-kube-api-access-cwlch\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:27 crc kubenswrapper[5037]: I1126 14:42:27.921577 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6069f8f-704d-4d3c-8007-0556c1e38b8d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:27 crc kubenswrapper[5037]: I1126 14:42:27.954375 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8678a3ca-f406-4732-8478-56f5ea2f6174-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8678a3ca-f406-4732-8478-56f5ea2f6174" (UID: "8678a3ca-f406-4732-8478-56f5ea2f6174"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:42:27 crc kubenswrapper[5037]: I1126 14:42:27.954541 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6069f8f-704d-4d3c-8007-0556c1e38b8d-config-data" (OuterVolumeSpecName: "config-data") pod "a6069f8f-704d-4d3c-8007-0556c1e38b8d" (UID: "a6069f8f-704d-4d3c-8007-0556c1e38b8d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:42:27 crc kubenswrapper[5037]: I1126 14:42:27.964119 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8678a3ca-f406-4732-8478-56f5ea2f6174-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "8678a3ca-f406-4732-8478-56f5ea2f6174" (UID: "8678a3ca-f406-4732-8478-56f5ea2f6174"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:42:27 crc kubenswrapper[5037]: I1126 14:42:27.965745 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8678a3ca-f406-4732-8478-56f5ea2f6174-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "8678a3ca-f406-4732-8478-56f5ea2f6174" (UID: "8678a3ca-f406-4732-8478-56f5ea2f6174"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:42:27 crc kubenswrapper[5037]: I1126 14:42:27.976917 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8678a3ca-f406-4732-8478-56f5ea2f6174-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "8678a3ca-f406-4732-8478-56f5ea2f6174" (UID: "8678a3ca-f406-4732-8478-56f5ea2f6174"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:42:27 crc kubenswrapper[5037]: I1126 14:42:27.980565 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8678a3ca-f406-4732-8478-56f5ea2f6174-config" (OuterVolumeSpecName: "config") pod "8678a3ca-f406-4732-8478-56f5ea2f6174" (UID: "8678a3ca-f406-4732-8478-56f5ea2f6174"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:42:28 crc kubenswrapper[5037]: I1126 14:42:28.023231 5037 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8678a3ca-f406-4732-8478-56f5ea2f6174-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:28 crc kubenswrapper[5037]: I1126 14:42:28.023279 5037 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8678a3ca-f406-4732-8478-56f5ea2f6174-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:28 crc kubenswrapper[5037]: I1126 14:42:28.023313 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8678a3ca-f406-4732-8478-56f5ea2f6174-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:28 crc kubenswrapper[5037]: I1126 14:42:28.023326 5037 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8678a3ca-f406-4732-8478-56f5ea2f6174-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:28 crc kubenswrapper[5037]: I1126 14:42:28.023339 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6069f8f-704d-4d3c-8007-0556c1e38b8d-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:28 crc kubenswrapper[5037]: I1126 14:42:28.023352 5037 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8678a3ca-f406-4732-8478-56f5ea2f6174-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:28 crc kubenswrapper[5037]: I1126 14:42:28.141607 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-v8rzx" Nov 26 14:42:28 crc kubenswrapper[5037]: I1126 14:42:28.141626 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-v8rzx" event={"ID":"a6069f8f-704d-4d3c-8007-0556c1e38b8d","Type":"ContainerDied","Data":"a29056776ac1f70bd6a34833ca93aec40b362f09569e94d68be10d58b68f7043"} Nov 26 14:42:28 crc kubenswrapper[5037]: I1126 14:42:28.142092 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a29056776ac1f70bd6a34833ca93aec40b362f09569e94d68be10d58b68f7043" Nov 26 14:42:28 crc kubenswrapper[5037]: I1126 14:42:28.143997 5037 generic.go:334] "Generic (PLEG): container finished" podID="8678a3ca-f406-4732-8478-56f5ea2f6174" containerID="1132189092b14ee6c21db91aac9b539c44a1f52e5371431b948a4a1b8b523cd1" exitCode=0 Nov 26 14:42:28 crc kubenswrapper[5037]: I1126 14:42:28.144036 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7965876c4f-xzpq9" event={"ID":"8678a3ca-f406-4732-8478-56f5ea2f6174","Type":"ContainerDied","Data":"1132189092b14ee6c21db91aac9b539c44a1f52e5371431b948a4a1b8b523cd1"} Nov 26 14:42:28 crc kubenswrapper[5037]: I1126 14:42:28.144058 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7965876c4f-xzpq9" Nov 26 14:42:28 crc kubenswrapper[5037]: I1126 14:42:28.144076 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7965876c4f-xzpq9" event={"ID":"8678a3ca-f406-4732-8478-56f5ea2f6174","Type":"ContainerDied","Data":"2e92351b867ee750c38ddac15e82a58d4ac748f02eb6dd93626c4eb49a639b6c"} Nov 26 14:42:28 crc kubenswrapper[5037]: I1126 14:42:28.144094 5037 scope.go:117] "RemoveContainer" containerID="1132189092b14ee6c21db91aac9b539c44a1f52e5371431b948a4a1b8b523cd1" Nov 26 14:42:28 crc kubenswrapper[5037]: I1126 14:42:28.168645 5037 scope.go:117] "RemoveContainer" containerID="86e778099b0962ae8384f2c06e358ab70c59fdf998892ac5a48d9b9fc91dac8e" Nov 26 14:42:28 crc kubenswrapper[5037]: I1126 14:42:28.194660 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7965876c4f-xzpq9"] Nov 26 14:42:28 crc kubenswrapper[5037]: I1126 14:42:28.203537 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7965876c4f-xzpq9"] Nov 26 14:42:28 crc kubenswrapper[5037]: I1126 14:42:28.221465 5037 scope.go:117] "RemoveContainer" containerID="1132189092b14ee6c21db91aac9b539c44a1f52e5371431b948a4a1b8b523cd1" Nov 26 14:42:28 crc kubenswrapper[5037]: E1126 14:42:28.222202 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1132189092b14ee6c21db91aac9b539c44a1f52e5371431b948a4a1b8b523cd1\": container with ID starting with 1132189092b14ee6c21db91aac9b539c44a1f52e5371431b948a4a1b8b523cd1 not found: ID does not exist" containerID="1132189092b14ee6c21db91aac9b539c44a1f52e5371431b948a4a1b8b523cd1" Nov 26 14:42:28 crc kubenswrapper[5037]: I1126 14:42:28.222260 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1132189092b14ee6c21db91aac9b539c44a1f52e5371431b948a4a1b8b523cd1"} err="failed to get container status \"1132189092b14ee6c21db91aac9b539c44a1f52e5371431b948a4a1b8b523cd1\": rpc error: code = NotFound desc = could not find container \"1132189092b14ee6c21db91aac9b539c44a1f52e5371431b948a4a1b8b523cd1\": container with ID starting with 1132189092b14ee6c21db91aac9b539c44a1f52e5371431b948a4a1b8b523cd1 not found: ID does not exist" Nov 26 14:42:28 crc kubenswrapper[5037]: I1126 14:42:28.222365 5037 scope.go:117] "RemoveContainer" containerID="86e778099b0962ae8384f2c06e358ab70c59fdf998892ac5a48d9b9fc91dac8e" Nov 26 14:42:28 crc kubenswrapper[5037]: E1126 14:42:28.223089 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"86e778099b0962ae8384f2c06e358ab70c59fdf998892ac5a48d9b9fc91dac8e\": container with ID starting with 86e778099b0962ae8384f2c06e358ab70c59fdf998892ac5a48d9b9fc91dac8e not found: ID does not exist" containerID="86e778099b0962ae8384f2c06e358ab70c59fdf998892ac5a48d9b9fc91dac8e" Nov 26 14:42:28 crc kubenswrapper[5037]: I1126 14:42:28.223193 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86e778099b0962ae8384f2c06e358ab70c59fdf998892ac5a48d9b9fc91dac8e"} err="failed to get container status \"86e778099b0962ae8384f2c06e358ab70c59fdf998892ac5a48d9b9fc91dac8e\": rpc error: code = NotFound desc = could not find container \"86e778099b0962ae8384f2c06e358ab70c59fdf998892ac5a48d9b9fc91dac8e\": container with ID starting with 86e778099b0962ae8384f2c06e358ab70c59fdf998892ac5a48d9b9fc91dac8e not found: ID does not exist" Nov 26 14:42:28 crc kubenswrapper[5037]: I1126 14:42:28.311033 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 14:42:28 crc kubenswrapper[5037]: I1126 14:42:28.311316 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="66baebd5-d041-48b1-a668-3492f4a0e22e" containerName="nova-api-log" containerID="cri-o://8d6a012ccb6b42dcde0ba2d8c506bc30f418692c2a201991371c8169e7a5602d" gracePeriod=30 Nov 26 14:42:28 crc kubenswrapper[5037]: I1126 14:42:28.311386 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="66baebd5-d041-48b1-a668-3492f4a0e22e" containerName="nova-api-api" containerID="cri-o://f70b194c2b10101be9564aab23553689c01fa86d35a1fe41ac655ee1beadf267" gracePeriod=30 Nov 26 14:42:28 crc kubenswrapper[5037]: I1126 14:42:28.327084 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 14:42:29 crc kubenswrapper[5037]: I1126 14:42:29.160501 5037 generic.go:334] "Generic (PLEG): container finished" podID="013f650e-1eaf-4a38-b62d-5e9efbf8b6b5" containerID="181d04aaba3a5e12256cd74fb1a24c1fbeb6893273bb7e2b686e760d59cbdfc5" exitCode=0 Nov 26 14:42:29 crc kubenswrapper[5037]: I1126 14:42:29.160574 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-pthqz" event={"ID":"013f650e-1eaf-4a38-b62d-5e9efbf8b6b5","Type":"ContainerDied","Data":"181d04aaba3a5e12256cd74fb1a24c1fbeb6893273bb7e2b686e760d59cbdfc5"} Nov 26 14:42:29 crc kubenswrapper[5037]: I1126 14:42:29.163933 5037 generic.go:334] "Generic (PLEG): container finished" podID="66baebd5-d041-48b1-a668-3492f4a0e22e" containerID="8d6a012ccb6b42dcde0ba2d8c506bc30f418692c2a201991371c8169e7a5602d" exitCode=143 Nov 26 14:42:29 crc kubenswrapper[5037]: I1126 14:42:29.164022 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"66baebd5-d041-48b1-a668-3492f4a0e22e","Type":"ContainerDied","Data":"8d6a012ccb6b42dcde0ba2d8c506bc30f418692c2a201991371c8169e7a5602d"} Nov 26 14:42:29 crc kubenswrapper[5037]: I1126 14:42:29.300215 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 14:42:29 crc kubenswrapper[5037]: I1126 14:42:29.300583 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="a7d2fc57-9486-4084-aabe-96ed92c69f2c" containerName="kube-state-metrics" containerID="cri-o://557776f46cf9b691730791d9711aeffac85522535f1f49783f904007f20687d6" gracePeriod=30 Nov 26 14:42:29 crc kubenswrapper[5037]: I1126 14:42:29.797352 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 14:42:29 crc kubenswrapper[5037]: I1126 14:42:29.856824 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k82xn\" (UniqueName: \"kubernetes.io/projected/a7d2fc57-9486-4084-aabe-96ed92c69f2c-kube-api-access-k82xn\") pod \"a7d2fc57-9486-4084-aabe-96ed92c69f2c\" (UID: \"a7d2fc57-9486-4084-aabe-96ed92c69f2c\") " Nov 26 14:42:29 crc kubenswrapper[5037]: I1126 14:42:29.872496 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7d2fc57-9486-4084-aabe-96ed92c69f2c-kube-api-access-k82xn" (OuterVolumeSpecName: "kube-api-access-k82xn") pod "a7d2fc57-9486-4084-aabe-96ed92c69f2c" (UID: "a7d2fc57-9486-4084-aabe-96ed92c69f2c"). InnerVolumeSpecName "kube-api-access-k82xn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:42:29 crc kubenswrapper[5037]: I1126 14:42:29.918866 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8678a3ca-f406-4732-8478-56f5ea2f6174" path="/var/lib/kubelet/pods/8678a3ca-f406-4732-8478-56f5ea2f6174/volumes" Nov 26 14:42:29 crc kubenswrapper[5037]: I1126 14:42:29.959501 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k82xn\" (UniqueName: \"kubernetes.io/projected/a7d2fc57-9486-4084-aabe-96ed92c69f2c-kube-api-access-k82xn\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.174920 5037 generic.go:334] "Generic (PLEG): container finished" podID="a7d2fc57-9486-4084-aabe-96ed92c69f2c" containerID="557776f46cf9b691730791d9711aeffac85522535f1f49783f904007f20687d6" exitCode=2 Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.175002 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.174996 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"a7d2fc57-9486-4084-aabe-96ed92c69f2c","Type":"ContainerDied","Data":"557776f46cf9b691730791d9711aeffac85522535f1f49783f904007f20687d6"} Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.175080 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"a7d2fc57-9486-4084-aabe-96ed92c69f2c","Type":"ContainerDied","Data":"09d7249a95dfdc6e5f7fda78fe3750c374fd28951ca8202d4326cb17513cf3e9"} Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.175115 5037 scope.go:117] "RemoveContainer" containerID="557776f46cf9b691730791d9711aeffac85522535f1f49783f904007f20687d6" Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.175848 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="6accd992-74e0-4a92-9886-557b3870fe81" containerName="nova-scheduler-scheduler" containerID="cri-o://94f7804ae1bbef2c592db90c0d711750bdc3bc1e226f50e8dfdbf47efb3baae1" gracePeriod=30 Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.204232 5037 scope.go:117] "RemoveContainer" containerID="557776f46cf9b691730791d9711aeffac85522535f1f49783f904007f20687d6" Nov 26 14:42:30 crc kubenswrapper[5037]: E1126 14:42:30.207645 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"557776f46cf9b691730791d9711aeffac85522535f1f49783f904007f20687d6\": container with ID starting with 557776f46cf9b691730791d9711aeffac85522535f1f49783f904007f20687d6 not found: ID does not exist" containerID="557776f46cf9b691730791d9711aeffac85522535f1f49783f904007f20687d6" Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.207720 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"557776f46cf9b691730791d9711aeffac85522535f1f49783f904007f20687d6"} err="failed to get container status \"557776f46cf9b691730791d9711aeffac85522535f1f49783f904007f20687d6\": rpc error: code = NotFound desc = could not find container \"557776f46cf9b691730791d9711aeffac85522535f1f49783f904007f20687d6\": container with ID starting with 557776f46cf9b691730791d9711aeffac85522535f1f49783f904007f20687d6 not found: ID does not exist" Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.214344 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.231330 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.241647 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 14:42:30 crc kubenswrapper[5037]: E1126 14:42:30.242133 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7d2fc57-9486-4084-aabe-96ed92c69f2c" containerName="kube-state-metrics" Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.242158 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7d2fc57-9486-4084-aabe-96ed92c69f2c" containerName="kube-state-metrics" Nov 26 14:42:30 crc kubenswrapper[5037]: E1126 14:42:30.242199 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8678a3ca-f406-4732-8478-56f5ea2f6174" containerName="init" Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.242208 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="8678a3ca-f406-4732-8478-56f5ea2f6174" containerName="init" Nov 26 14:42:30 crc kubenswrapper[5037]: E1126 14:42:30.242229 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8678a3ca-f406-4732-8478-56f5ea2f6174" containerName="dnsmasq-dns" Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.242235 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="8678a3ca-f406-4732-8478-56f5ea2f6174" containerName="dnsmasq-dns" Nov 26 14:42:30 crc kubenswrapper[5037]: E1126 14:42:30.242245 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6069f8f-704d-4d3c-8007-0556c1e38b8d" containerName="nova-manage" Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.242250 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6069f8f-704d-4d3c-8007-0556c1e38b8d" containerName="nova-manage" Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.242452 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6069f8f-704d-4d3c-8007-0556c1e38b8d" containerName="nova-manage" Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.242468 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7d2fc57-9486-4084-aabe-96ed92c69f2c" containerName="kube-state-metrics" Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.242485 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="8678a3ca-f406-4732-8478-56f5ea2f6174" containerName="dnsmasq-dns" Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.243146 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.245190 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.245887 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.250243 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.368308 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/fd171888-b656-4511-af7d-cdff1058bf5f-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"fd171888-b656-4511-af7d-cdff1058bf5f\") " pod="openstack/kube-state-metrics-0" Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.368406 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd171888-b656-4511-af7d-cdff1058bf5f-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"fd171888-b656-4511-af7d-cdff1058bf5f\") " pod="openstack/kube-state-metrics-0" Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.368658 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q6j74\" (UniqueName: \"kubernetes.io/projected/fd171888-b656-4511-af7d-cdff1058bf5f-kube-api-access-q6j74\") pod \"kube-state-metrics-0\" (UID: \"fd171888-b656-4511-af7d-cdff1058bf5f\") " pod="openstack/kube-state-metrics-0" Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.368890 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/fd171888-b656-4511-af7d-cdff1058bf5f-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"fd171888-b656-4511-af7d-cdff1058bf5f\") " pod="openstack/kube-state-metrics-0" Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.470908 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q6j74\" (UniqueName: \"kubernetes.io/projected/fd171888-b656-4511-af7d-cdff1058bf5f-kube-api-access-q6j74\") pod \"kube-state-metrics-0\" (UID: \"fd171888-b656-4511-af7d-cdff1058bf5f\") " pod="openstack/kube-state-metrics-0" Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.471016 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/fd171888-b656-4511-af7d-cdff1058bf5f-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"fd171888-b656-4511-af7d-cdff1058bf5f\") " pod="openstack/kube-state-metrics-0" Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.471061 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/fd171888-b656-4511-af7d-cdff1058bf5f-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"fd171888-b656-4511-af7d-cdff1058bf5f\") " pod="openstack/kube-state-metrics-0" Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.471153 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd171888-b656-4511-af7d-cdff1058bf5f-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"fd171888-b656-4511-af7d-cdff1058bf5f\") " pod="openstack/kube-state-metrics-0" Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.477720 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/fd171888-b656-4511-af7d-cdff1058bf5f-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"fd171888-b656-4511-af7d-cdff1058bf5f\") " pod="openstack/kube-state-metrics-0" Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.477794 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/fd171888-b656-4511-af7d-cdff1058bf5f-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"fd171888-b656-4511-af7d-cdff1058bf5f\") " pod="openstack/kube-state-metrics-0" Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.477973 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd171888-b656-4511-af7d-cdff1058bf5f-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"fd171888-b656-4511-af7d-cdff1058bf5f\") " pod="openstack/kube-state-metrics-0" Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.487885 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q6j74\" (UniqueName: \"kubernetes.io/projected/fd171888-b656-4511-af7d-cdff1058bf5f-kube-api-access-q6j74\") pod \"kube-state-metrics-0\" (UID: \"fd171888-b656-4511-af7d-cdff1058bf5f\") " pod="openstack/kube-state-metrics-0" Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.556998 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-pthqz" Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.558402 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.674935 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/013f650e-1eaf-4a38-b62d-5e9efbf8b6b5-scripts\") pod \"013f650e-1eaf-4a38-b62d-5e9efbf8b6b5\" (UID: \"013f650e-1eaf-4a38-b62d-5e9efbf8b6b5\") " Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.675051 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/013f650e-1eaf-4a38-b62d-5e9efbf8b6b5-combined-ca-bundle\") pod \"013f650e-1eaf-4a38-b62d-5e9efbf8b6b5\" (UID: \"013f650e-1eaf-4a38-b62d-5e9efbf8b6b5\") " Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.675135 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/013f650e-1eaf-4a38-b62d-5e9efbf8b6b5-config-data\") pod \"013f650e-1eaf-4a38-b62d-5e9efbf8b6b5\" (UID: \"013f650e-1eaf-4a38-b62d-5e9efbf8b6b5\") " Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.675221 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6n76t\" (UniqueName: \"kubernetes.io/projected/013f650e-1eaf-4a38-b62d-5e9efbf8b6b5-kube-api-access-6n76t\") pod \"013f650e-1eaf-4a38-b62d-5e9efbf8b6b5\" (UID: \"013f650e-1eaf-4a38-b62d-5e9efbf8b6b5\") " Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.680102 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/013f650e-1eaf-4a38-b62d-5e9efbf8b6b5-kube-api-access-6n76t" (OuterVolumeSpecName: "kube-api-access-6n76t") pod "013f650e-1eaf-4a38-b62d-5e9efbf8b6b5" (UID: "013f650e-1eaf-4a38-b62d-5e9efbf8b6b5"). InnerVolumeSpecName "kube-api-access-6n76t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.698197 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/013f650e-1eaf-4a38-b62d-5e9efbf8b6b5-scripts" (OuterVolumeSpecName: "scripts") pod "013f650e-1eaf-4a38-b62d-5e9efbf8b6b5" (UID: "013f650e-1eaf-4a38-b62d-5e9efbf8b6b5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.709829 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/013f650e-1eaf-4a38-b62d-5e9efbf8b6b5-config-data" (OuterVolumeSpecName: "config-data") pod "013f650e-1eaf-4a38-b62d-5e9efbf8b6b5" (UID: "013f650e-1eaf-4a38-b62d-5e9efbf8b6b5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.725404 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/013f650e-1eaf-4a38-b62d-5e9efbf8b6b5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "013f650e-1eaf-4a38-b62d-5e9efbf8b6b5" (UID: "013f650e-1eaf-4a38-b62d-5e9efbf8b6b5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.777878 5037 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/013f650e-1eaf-4a38-b62d-5e9efbf8b6b5-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.777912 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/013f650e-1eaf-4a38-b62d-5e9efbf8b6b5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.777924 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/013f650e-1eaf-4a38-b62d-5e9efbf8b6b5-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:30 crc kubenswrapper[5037]: I1126 14:42:30.777933 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6n76t\" (UniqueName: \"kubernetes.io/projected/013f650e-1eaf-4a38-b62d-5e9efbf8b6b5-kube-api-access-6n76t\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:31 crc kubenswrapper[5037]: W1126 14:42:31.039719 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfd171888_b656_4511_af7d_cdff1058bf5f.slice/crio-d4e2dc7ebb7a8511c34726eae35d408c89b1328abbddcd1cf083a81227b4e83d WatchSource:0}: Error finding container d4e2dc7ebb7a8511c34726eae35d408c89b1328abbddcd1cf083a81227b4e83d: Status 404 returned error can't find the container with id d4e2dc7ebb7a8511c34726eae35d408c89b1328abbddcd1cf083a81227b4e83d Nov 26 14:42:31 crc kubenswrapper[5037]: I1126 14:42:31.040204 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 14:42:31 crc kubenswrapper[5037]: I1126 14:42:31.185982 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-pthqz" event={"ID":"013f650e-1eaf-4a38-b62d-5e9efbf8b6b5","Type":"ContainerDied","Data":"875b0c8dda8e5cc1487cf7d5db68a9a6ab3c0aeb9afa42b06d3e0eba23d55488"} Nov 26 14:42:31 crc kubenswrapper[5037]: I1126 14:42:31.186022 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-pthqz" Nov 26 14:42:31 crc kubenswrapper[5037]: I1126 14:42:31.186037 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="875b0c8dda8e5cc1487cf7d5db68a9a6ab3c0aeb9afa42b06d3e0eba23d55488" Nov 26 14:42:31 crc kubenswrapper[5037]: I1126 14:42:31.187895 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"fd171888-b656-4511-af7d-cdff1058bf5f","Type":"ContainerStarted","Data":"d4e2dc7ebb7a8511c34726eae35d408c89b1328abbddcd1cf083a81227b4e83d"} Nov 26 14:42:31 crc kubenswrapper[5037]: I1126 14:42:31.258852 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 14:42:31 crc kubenswrapper[5037]: E1126 14:42:31.259417 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="013f650e-1eaf-4a38-b62d-5e9efbf8b6b5" containerName="nova-cell1-conductor-db-sync" Nov 26 14:42:31 crc kubenswrapper[5037]: I1126 14:42:31.259442 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="013f650e-1eaf-4a38-b62d-5e9efbf8b6b5" containerName="nova-cell1-conductor-db-sync" Nov 26 14:42:31 crc kubenswrapper[5037]: I1126 14:42:31.259700 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="013f650e-1eaf-4a38-b62d-5e9efbf8b6b5" containerName="nova-cell1-conductor-db-sync" Nov 26 14:42:31 crc kubenswrapper[5037]: I1126 14:42:31.260537 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 26 14:42:31 crc kubenswrapper[5037]: I1126 14:42:31.262546 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 26 14:42:31 crc kubenswrapper[5037]: I1126 14:42:31.268624 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 14:42:31 crc kubenswrapper[5037]: I1126 14:42:31.388042 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd47ce65-1426-47e2-a5d1-6efd83bac3ab-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"dd47ce65-1426-47e2-a5d1-6efd83bac3ab\") " pod="openstack/nova-cell1-conductor-0" Nov 26 14:42:31 crc kubenswrapper[5037]: I1126 14:42:31.388389 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hrbtf\" (UniqueName: \"kubernetes.io/projected/dd47ce65-1426-47e2-a5d1-6efd83bac3ab-kube-api-access-hrbtf\") pod \"nova-cell1-conductor-0\" (UID: \"dd47ce65-1426-47e2-a5d1-6efd83bac3ab\") " pod="openstack/nova-cell1-conductor-0" Nov 26 14:42:31 crc kubenswrapper[5037]: I1126 14:42:31.388706 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd47ce65-1426-47e2-a5d1-6efd83bac3ab-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"dd47ce65-1426-47e2-a5d1-6efd83bac3ab\") " pod="openstack/nova-cell1-conductor-0" Nov 26 14:42:31 crc kubenswrapper[5037]: I1126 14:42:31.470364 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:42:31 crc kubenswrapper[5037]: I1126 14:42:31.470840 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9d968815-6164-4fa7-83d2-035b696f148d" containerName="ceilometer-central-agent" containerID="cri-o://54ed1217ea8de6a4690b225bb980822d31de906395d654b95fad0d274a1b8b5c" gracePeriod=30 Nov 26 14:42:31 crc kubenswrapper[5037]: I1126 14:42:31.471166 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9d968815-6164-4fa7-83d2-035b696f148d" containerName="proxy-httpd" containerID="cri-o://0303fb9fff0d1803e3347db888d277ccd5448fb89cd1d5d855a030ae31164434" gracePeriod=30 Nov 26 14:42:31 crc kubenswrapper[5037]: I1126 14:42:31.471217 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9d968815-6164-4fa7-83d2-035b696f148d" containerName="sg-core" containerID="cri-o://0a78ad12e15653354527d757d669a9c5862aa88c2deb7e8fb98a1f6e4b597da2" gracePeriod=30 Nov 26 14:42:31 crc kubenswrapper[5037]: I1126 14:42:31.471168 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9d968815-6164-4fa7-83d2-035b696f148d" containerName="ceilometer-notification-agent" containerID="cri-o://a38dba55d4a4dd199020fb608a6996abd84a3b5ffc6d538b6bad80e90d502b76" gracePeriod=30 Nov 26 14:42:31 crc kubenswrapper[5037]: I1126 14:42:31.490204 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hrbtf\" (UniqueName: \"kubernetes.io/projected/dd47ce65-1426-47e2-a5d1-6efd83bac3ab-kube-api-access-hrbtf\") pod \"nova-cell1-conductor-0\" (UID: \"dd47ce65-1426-47e2-a5d1-6efd83bac3ab\") " pod="openstack/nova-cell1-conductor-0" Nov 26 14:42:31 crc kubenswrapper[5037]: I1126 14:42:31.490356 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd47ce65-1426-47e2-a5d1-6efd83bac3ab-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"dd47ce65-1426-47e2-a5d1-6efd83bac3ab\") " pod="openstack/nova-cell1-conductor-0" Nov 26 14:42:31 crc kubenswrapper[5037]: I1126 14:42:31.490429 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd47ce65-1426-47e2-a5d1-6efd83bac3ab-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"dd47ce65-1426-47e2-a5d1-6efd83bac3ab\") " pod="openstack/nova-cell1-conductor-0" Nov 26 14:42:31 crc kubenswrapper[5037]: I1126 14:42:31.494007 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd47ce65-1426-47e2-a5d1-6efd83bac3ab-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"dd47ce65-1426-47e2-a5d1-6efd83bac3ab\") " pod="openstack/nova-cell1-conductor-0" Nov 26 14:42:31 crc kubenswrapper[5037]: I1126 14:42:31.495460 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd47ce65-1426-47e2-a5d1-6efd83bac3ab-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"dd47ce65-1426-47e2-a5d1-6efd83bac3ab\") " pod="openstack/nova-cell1-conductor-0" Nov 26 14:42:31 crc kubenswrapper[5037]: I1126 14:42:31.505766 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hrbtf\" (UniqueName: \"kubernetes.io/projected/dd47ce65-1426-47e2-a5d1-6efd83bac3ab-kube-api-access-hrbtf\") pod \"nova-cell1-conductor-0\" (UID: \"dd47ce65-1426-47e2-a5d1-6efd83bac3ab\") " pod="openstack/nova-cell1-conductor-0" Nov 26 14:42:31 crc kubenswrapper[5037]: I1126 14:42:31.579323 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 26 14:42:31 crc kubenswrapper[5037]: E1126 14:42:31.657613 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="94f7804ae1bbef2c592db90c0d711750bdc3bc1e226f50e8dfdbf47efb3baae1" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 14:42:31 crc kubenswrapper[5037]: E1126 14:42:31.661130 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="94f7804ae1bbef2c592db90c0d711750bdc3bc1e226f50e8dfdbf47efb3baae1" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 14:42:31 crc kubenswrapper[5037]: E1126 14:42:31.662667 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="94f7804ae1bbef2c592db90c0d711750bdc3bc1e226f50e8dfdbf47efb3baae1" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 14:42:31 crc kubenswrapper[5037]: E1126 14:42:31.662732 5037 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="6accd992-74e0-4a92-9886-557b3870fe81" containerName="nova-scheduler-scheduler" Nov 26 14:42:31 crc kubenswrapper[5037]: I1126 14:42:31.919605 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7d2fc57-9486-4084-aabe-96ed92c69f2c" path="/var/lib/kubelet/pods/a7d2fc57-9486-4084-aabe-96ed92c69f2c/volumes" Nov 26 14:42:32 crc kubenswrapper[5037]: I1126 14:42:32.051959 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 14:42:32 crc kubenswrapper[5037]: I1126 14:42:32.202796 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"dd47ce65-1426-47e2-a5d1-6efd83bac3ab","Type":"ContainerStarted","Data":"987c501de9a8908355bbdb779a488b1aae86858303bf80cd01d671d920bb722b"} Nov 26 14:42:32 crc kubenswrapper[5037]: I1126 14:42:32.204038 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"fd171888-b656-4511-af7d-cdff1058bf5f","Type":"ContainerStarted","Data":"9f6f522c179ad5b2c8f6d172e255b1812ec80e1f19a7f152457199761310283e"} Nov 26 14:42:32 crc kubenswrapper[5037]: I1126 14:42:32.204312 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 26 14:42:32 crc kubenswrapper[5037]: I1126 14:42:32.208887 5037 generic.go:334] "Generic (PLEG): container finished" podID="9d968815-6164-4fa7-83d2-035b696f148d" containerID="0303fb9fff0d1803e3347db888d277ccd5448fb89cd1d5d855a030ae31164434" exitCode=0 Nov 26 14:42:32 crc kubenswrapper[5037]: I1126 14:42:32.208983 5037 generic.go:334] "Generic (PLEG): container finished" podID="9d968815-6164-4fa7-83d2-035b696f148d" containerID="0a78ad12e15653354527d757d669a9c5862aa88c2deb7e8fb98a1f6e4b597da2" exitCode=2 Nov 26 14:42:32 crc kubenswrapper[5037]: I1126 14:42:32.209049 5037 generic.go:334] "Generic (PLEG): container finished" podID="9d968815-6164-4fa7-83d2-035b696f148d" containerID="a38dba55d4a4dd199020fb608a6996abd84a3b5ffc6d538b6bad80e90d502b76" exitCode=0 Nov 26 14:42:32 crc kubenswrapper[5037]: I1126 14:42:32.209104 5037 generic.go:334] "Generic (PLEG): container finished" podID="9d968815-6164-4fa7-83d2-035b696f148d" containerID="54ed1217ea8de6a4690b225bb980822d31de906395d654b95fad0d274a1b8b5c" exitCode=0 Nov 26 14:42:32 crc kubenswrapper[5037]: I1126 14:42:32.209277 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9d968815-6164-4fa7-83d2-035b696f148d","Type":"ContainerDied","Data":"0303fb9fff0d1803e3347db888d277ccd5448fb89cd1d5d855a030ae31164434"} Nov 26 14:42:32 crc kubenswrapper[5037]: I1126 14:42:32.218661 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9d968815-6164-4fa7-83d2-035b696f148d","Type":"ContainerDied","Data":"0a78ad12e15653354527d757d669a9c5862aa88c2deb7e8fb98a1f6e4b597da2"} Nov 26 14:42:32 crc kubenswrapper[5037]: I1126 14:42:32.218683 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9d968815-6164-4fa7-83d2-035b696f148d","Type":"ContainerDied","Data":"a38dba55d4a4dd199020fb608a6996abd84a3b5ffc6d538b6bad80e90d502b76"} Nov 26 14:42:32 crc kubenswrapper[5037]: I1126 14:42:32.218694 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9d968815-6164-4fa7-83d2-035b696f148d","Type":"ContainerDied","Data":"54ed1217ea8de6a4690b225bb980822d31de906395d654b95fad0d274a1b8b5c"} Nov 26 14:42:32 crc kubenswrapper[5037]: I1126 14:42:32.230812 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=1.878986028 podStartE2EDuration="2.230784664s" podCreationTimestamp="2025-11-26 14:42:30 +0000 UTC" firstStartedPulling="2025-11-26 14:42:31.04290255 +0000 UTC m=+1617.839672754" lastFinishedPulling="2025-11-26 14:42:31.394701206 +0000 UTC m=+1618.191471390" observedRunningTime="2025-11-26 14:42:32.227659597 +0000 UTC m=+1619.024429781" watchObservedRunningTime="2025-11-26 14:42:32.230784664 +0000 UTC m=+1619.027554878" Nov 26 14:42:32 crc kubenswrapper[5037]: I1126 14:42:32.464654 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 14:42:32 crc kubenswrapper[5037]: I1126 14:42:32.527967 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d968815-6164-4fa7-83d2-035b696f148d-log-httpd\") pod \"9d968815-6164-4fa7-83d2-035b696f148d\" (UID: \"9d968815-6164-4fa7-83d2-035b696f148d\") " Nov 26 14:42:32 crc kubenswrapper[5037]: I1126 14:42:32.528004 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5wnz9\" (UniqueName: \"kubernetes.io/projected/9d968815-6164-4fa7-83d2-035b696f148d-kube-api-access-5wnz9\") pod \"9d968815-6164-4fa7-83d2-035b696f148d\" (UID: \"9d968815-6164-4fa7-83d2-035b696f148d\") " Nov 26 14:42:32 crc kubenswrapper[5037]: I1126 14:42:32.528039 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9d968815-6164-4fa7-83d2-035b696f148d-sg-core-conf-yaml\") pod \"9d968815-6164-4fa7-83d2-035b696f148d\" (UID: \"9d968815-6164-4fa7-83d2-035b696f148d\") " Nov 26 14:42:32 crc kubenswrapper[5037]: I1126 14:42:32.528077 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d968815-6164-4fa7-83d2-035b696f148d-scripts\") pod \"9d968815-6164-4fa7-83d2-035b696f148d\" (UID: \"9d968815-6164-4fa7-83d2-035b696f148d\") " Nov 26 14:42:32 crc kubenswrapper[5037]: I1126 14:42:32.528156 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d968815-6164-4fa7-83d2-035b696f148d-combined-ca-bundle\") pod \"9d968815-6164-4fa7-83d2-035b696f148d\" (UID: \"9d968815-6164-4fa7-83d2-035b696f148d\") " Nov 26 14:42:32 crc kubenswrapper[5037]: I1126 14:42:32.528212 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d968815-6164-4fa7-83d2-035b696f148d-run-httpd\") pod \"9d968815-6164-4fa7-83d2-035b696f148d\" (UID: \"9d968815-6164-4fa7-83d2-035b696f148d\") " Nov 26 14:42:32 crc kubenswrapper[5037]: I1126 14:42:32.528280 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d968815-6164-4fa7-83d2-035b696f148d-config-data\") pod \"9d968815-6164-4fa7-83d2-035b696f148d\" (UID: \"9d968815-6164-4fa7-83d2-035b696f148d\") " Nov 26 14:42:32 crc kubenswrapper[5037]: I1126 14:42:32.529850 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d968815-6164-4fa7-83d2-035b696f148d-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "9d968815-6164-4fa7-83d2-035b696f148d" (UID: "9d968815-6164-4fa7-83d2-035b696f148d"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:42:32 crc kubenswrapper[5037]: I1126 14:42:32.533189 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d968815-6164-4fa7-83d2-035b696f148d-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "9d968815-6164-4fa7-83d2-035b696f148d" (UID: "9d968815-6164-4fa7-83d2-035b696f148d"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:42:32 crc kubenswrapper[5037]: I1126 14:42:32.556433 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d968815-6164-4fa7-83d2-035b696f148d-scripts" (OuterVolumeSpecName: "scripts") pod "9d968815-6164-4fa7-83d2-035b696f148d" (UID: "9d968815-6164-4fa7-83d2-035b696f148d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:42:32 crc kubenswrapper[5037]: I1126 14:42:32.593612 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d968815-6164-4fa7-83d2-035b696f148d-kube-api-access-5wnz9" (OuterVolumeSpecName: "kube-api-access-5wnz9") pod "9d968815-6164-4fa7-83d2-035b696f148d" (UID: "9d968815-6164-4fa7-83d2-035b696f148d"). InnerVolumeSpecName "kube-api-access-5wnz9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:42:32 crc kubenswrapper[5037]: I1126 14:42:32.631440 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d968815-6164-4fa7-83d2-035b696f148d-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "9d968815-6164-4fa7-83d2-035b696f148d" (UID: "9d968815-6164-4fa7-83d2-035b696f148d"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:42:32 crc kubenswrapper[5037]: I1126 14:42:32.632613 5037 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d968815-6164-4fa7-83d2-035b696f148d-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:32 crc kubenswrapper[5037]: I1126 14:42:32.632631 5037 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d968815-6164-4fa7-83d2-035b696f148d-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:32 crc kubenswrapper[5037]: I1126 14:42:32.632640 5037 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9d968815-6164-4fa7-83d2-035b696f148d-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:32 crc kubenswrapper[5037]: I1126 14:42:32.632649 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5wnz9\" (UniqueName: \"kubernetes.io/projected/9d968815-6164-4fa7-83d2-035b696f148d-kube-api-access-5wnz9\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:32 crc kubenswrapper[5037]: I1126 14:42:32.632660 5037 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9d968815-6164-4fa7-83d2-035b696f148d-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:32 crc kubenswrapper[5037]: I1126 14:42:32.705609 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d968815-6164-4fa7-83d2-035b696f148d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9d968815-6164-4fa7-83d2-035b696f148d" (UID: "9d968815-6164-4fa7-83d2-035b696f148d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:42:32 crc kubenswrapper[5037]: I1126 14:42:32.720628 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d968815-6164-4fa7-83d2-035b696f148d-config-data" (OuterVolumeSpecName: "config-data") pod "9d968815-6164-4fa7-83d2-035b696f148d" (UID: "9d968815-6164-4fa7-83d2-035b696f148d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:42:32 crc kubenswrapper[5037]: I1126 14:42:32.733935 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d968815-6164-4fa7-83d2-035b696f148d-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:32 crc kubenswrapper[5037]: I1126 14:42:32.733966 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d968815-6164-4fa7-83d2-035b696f148d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.227939 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9d968815-6164-4fa7-83d2-035b696f148d","Type":"ContainerDied","Data":"23baa7379d57634e437a14a3c42742aa4e73c393ea76d470a2adf5d0df43afcf"} Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.228205 5037 scope.go:117] "RemoveContainer" containerID="0303fb9fff0d1803e3347db888d277ccd5448fb89cd1d5d855a030ae31164434" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.227974 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.229308 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"dd47ce65-1426-47e2-a5d1-6efd83bac3ab","Type":"ContainerStarted","Data":"cafd25254996ab2af2a3389cdf0cdcd2a0d515e80e24d59e43ba4b1e34bf696b"} Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.229438 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.261275 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.261253696 podStartE2EDuration="2.261253696s" podCreationTimestamp="2025-11-26 14:42:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:42:33.254360018 +0000 UTC m=+1620.051130202" watchObservedRunningTime="2025-11-26 14:42:33.261253696 +0000 UTC m=+1620.058023880" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.275266 5037 scope.go:117] "RemoveContainer" containerID="0a78ad12e15653354527d757d669a9c5862aa88c2deb7e8fb98a1f6e4b597da2" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.296143 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.314654 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.327167 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:42:33 crc kubenswrapper[5037]: E1126 14:42:33.327676 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d968815-6164-4fa7-83d2-035b696f148d" containerName="ceilometer-notification-agent" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.327694 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d968815-6164-4fa7-83d2-035b696f148d" containerName="ceilometer-notification-agent" Nov 26 14:42:33 crc kubenswrapper[5037]: E1126 14:42:33.327716 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d968815-6164-4fa7-83d2-035b696f148d" containerName="ceilometer-central-agent" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.327722 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d968815-6164-4fa7-83d2-035b696f148d" containerName="ceilometer-central-agent" Nov 26 14:42:33 crc kubenswrapper[5037]: E1126 14:42:33.327733 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d968815-6164-4fa7-83d2-035b696f148d" containerName="proxy-httpd" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.327739 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d968815-6164-4fa7-83d2-035b696f148d" containerName="proxy-httpd" Nov 26 14:42:33 crc kubenswrapper[5037]: E1126 14:42:33.327752 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d968815-6164-4fa7-83d2-035b696f148d" containerName="sg-core" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.327758 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d968815-6164-4fa7-83d2-035b696f148d" containerName="sg-core" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.327932 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d968815-6164-4fa7-83d2-035b696f148d" containerName="ceilometer-notification-agent" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.327954 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d968815-6164-4fa7-83d2-035b696f148d" containerName="sg-core" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.327967 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d968815-6164-4fa7-83d2-035b696f148d" containerName="proxy-httpd" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.327980 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d968815-6164-4fa7-83d2-035b696f148d" containerName="ceilometer-central-agent" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.329636 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.332961 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.335674 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.339492 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.341590 5037 scope.go:117] "RemoveContainer" containerID="a38dba55d4a4dd199020fb608a6996abd84a3b5ffc6d538b6bad80e90d502b76" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.342118 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.370388 5037 scope.go:117] "RemoveContainer" containerID="54ed1217ea8de6a4690b225bb980822d31de906395d654b95fad0d274a1b8b5c" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.449649 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\") " pod="openstack/ceilometer-0" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.449988 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-config-data\") pod \"ceilometer-0\" (UID: \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\") " pod="openstack/ceilometer-0" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.450067 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ctsjg\" (UniqueName: \"kubernetes.io/projected/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-kube-api-access-ctsjg\") pod \"ceilometer-0\" (UID: \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\") " pod="openstack/ceilometer-0" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.450099 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-log-httpd\") pod \"ceilometer-0\" (UID: \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\") " pod="openstack/ceilometer-0" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.450135 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\") " pod="openstack/ceilometer-0" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.450181 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-run-httpd\") pod \"ceilometer-0\" (UID: \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\") " pod="openstack/ceilometer-0" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.450253 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-scripts\") pod \"ceilometer-0\" (UID: \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\") " pod="openstack/ceilometer-0" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.450352 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\") " pod="openstack/ceilometer-0" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.552482 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-run-httpd\") pod \"ceilometer-0\" (UID: \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\") " pod="openstack/ceilometer-0" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.552567 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-scripts\") pod \"ceilometer-0\" (UID: \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\") " pod="openstack/ceilometer-0" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.552632 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\") " pod="openstack/ceilometer-0" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.552676 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\") " pod="openstack/ceilometer-0" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.552722 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-config-data\") pod \"ceilometer-0\" (UID: \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\") " pod="openstack/ceilometer-0" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.552774 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ctsjg\" (UniqueName: \"kubernetes.io/projected/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-kube-api-access-ctsjg\") pod \"ceilometer-0\" (UID: \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\") " pod="openstack/ceilometer-0" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.552806 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-log-httpd\") pod \"ceilometer-0\" (UID: \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\") " pod="openstack/ceilometer-0" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.552842 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\") " pod="openstack/ceilometer-0" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.553991 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-log-httpd\") pod \"ceilometer-0\" (UID: \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\") " pod="openstack/ceilometer-0" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.555179 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-run-httpd\") pod \"ceilometer-0\" (UID: \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\") " pod="openstack/ceilometer-0" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.559368 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\") " pod="openstack/ceilometer-0" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.559567 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-config-data\") pod \"ceilometer-0\" (UID: \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\") " pod="openstack/ceilometer-0" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.560772 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\") " pod="openstack/ceilometer-0" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.561193 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\") " pod="openstack/ceilometer-0" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.564840 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-scripts\") pod \"ceilometer-0\" (UID: \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\") " pod="openstack/ceilometer-0" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.570447 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ctsjg\" (UniqueName: \"kubernetes.io/projected/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-kube-api-access-ctsjg\") pod \"ceilometer-0\" (UID: \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\") " pod="openstack/ceilometer-0" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.650132 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 14:42:33 crc kubenswrapper[5037]: I1126 14:42:33.919543 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d968815-6164-4fa7-83d2-035b696f148d" path="/var/lib/kubelet/pods/9d968815-6164-4fa7-83d2-035b696f148d/volumes" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.152252 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.164662 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.248230 5037 generic.go:334] "Generic (PLEG): container finished" podID="66baebd5-d041-48b1-a668-3492f4a0e22e" containerID="f70b194c2b10101be9564aab23553689c01fa86d35a1fe41ac655ee1beadf267" exitCode=0 Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.248298 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"66baebd5-d041-48b1-a668-3492f4a0e22e","Type":"ContainerDied","Data":"f70b194c2b10101be9564aab23553689c01fa86d35a1fe41ac655ee1beadf267"} Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.248324 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"66baebd5-d041-48b1-a668-3492f4a0e22e","Type":"ContainerDied","Data":"3a7a1b24b0ea9b16d8068861c8a518396b283e724173c7ccb849f7dc56f3d71f"} Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.248341 5037 scope.go:117] "RemoveContainer" containerID="f70b194c2b10101be9564aab23553689c01fa86d35a1fe41ac655ee1beadf267" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.248423 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.254218 5037 generic.go:334] "Generic (PLEG): container finished" podID="6accd992-74e0-4a92-9886-557b3870fe81" containerID="94f7804ae1bbef2c592db90c0d711750bdc3bc1e226f50e8dfdbf47efb3baae1" exitCode=0 Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.254275 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6accd992-74e0-4a92-9886-557b3870fe81","Type":"ContainerDied","Data":"94f7804ae1bbef2c592db90c0d711750bdc3bc1e226f50e8dfdbf47efb3baae1"} Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.263274 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556","Type":"ContainerStarted","Data":"2f3f7e34bbab6e1ae1e6ec76586e4a34e52130932b59701e0056978c4d0ae4b4"} Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.265703 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66baebd5-d041-48b1-a668-3492f4a0e22e-config-data\") pod \"66baebd5-d041-48b1-a668-3492f4a0e22e\" (UID: \"66baebd5-d041-48b1-a668-3492f4a0e22e\") " Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.265956 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/66baebd5-d041-48b1-a668-3492f4a0e22e-logs\") pod \"66baebd5-d041-48b1-a668-3492f4a0e22e\" (UID: \"66baebd5-d041-48b1-a668-3492f4a0e22e\") " Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.266016 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66baebd5-d041-48b1-a668-3492f4a0e22e-combined-ca-bundle\") pod \"66baebd5-d041-48b1-a668-3492f4a0e22e\" (UID: \"66baebd5-d041-48b1-a668-3492f4a0e22e\") " Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.266044 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxjb2\" (UniqueName: \"kubernetes.io/projected/66baebd5-d041-48b1-a668-3492f4a0e22e-kube-api-access-wxjb2\") pod \"66baebd5-d041-48b1-a668-3492f4a0e22e\" (UID: \"66baebd5-d041-48b1-a668-3492f4a0e22e\") " Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.268399 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/66baebd5-d041-48b1-a668-3492f4a0e22e-logs" (OuterVolumeSpecName: "logs") pod "66baebd5-d041-48b1-a668-3492f4a0e22e" (UID: "66baebd5-d041-48b1-a668-3492f4a0e22e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.271869 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66baebd5-d041-48b1-a668-3492f4a0e22e-kube-api-access-wxjb2" (OuterVolumeSpecName: "kube-api-access-wxjb2") pod "66baebd5-d041-48b1-a668-3492f4a0e22e" (UID: "66baebd5-d041-48b1-a668-3492f4a0e22e"). InnerVolumeSpecName "kube-api-access-wxjb2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.284260 5037 scope.go:117] "RemoveContainer" containerID="8d6a012ccb6b42dcde0ba2d8c506bc30f418692c2a201991371c8169e7a5602d" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.301623 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66baebd5-d041-48b1-a668-3492f4a0e22e-config-data" (OuterVolumeSpecName: "config-data") pod "66baebd5-d041-48b1-a668-3492f4a0e22e" (UID: "66baebd5-d041-48b1-a668-3492f4a0e22e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.312328 5037 scope.go:117] "RemoveContainer" containerID="f70b194c2b10101be9564aab23553689c01fa86d35a1fe41ac655ee1beadf267" Nov 26 14:42:34 crc kubenswrapper[5037]: E1126 14:42:34.312720 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f70b194c2b10101be9564aab23553689c01fa86d35a1fe41ac655ee1beadf267\": container with ID starting with f70b194c2b10101be9564aab23553689c01fa86d35a1fe41ac655ee1beadf267 not found: ID does not exist" containerID="f70b194c2b10101be9564aab23553689c01fa86d35a1fe41ac655ee1beadf267" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.312757 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f70b194c2b10101be9564aab23553689c01fa86d35a1fe41ac655ee1beadf267"} err="failed to get container status \"f70b194c2b10101be9564aab23553689c01fa86d35a1fe41ac655ee1beadf267\": rpc error: code = NotFound desc = could not find container \"f70b194c2b10101be9564aab23553689c01fa86d35a1fe41ac655ee1beadf267\": container with ID starting with f70b194c2b10101be9564aab23553689c01fa86d35a1fe41ac655ee1beadf267 not found: ID does not exist" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.312779 5037 scope.go:117] "RemoveContainer" containerID="8d6a012ccb6b42dcde0ba2d8c506bc30f418692c2a201991371c8169e7a5602d" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.312950 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66baebd5-d041-48b1-a668-3492f4a0e22e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "66baebd5-d041-48b1-a668-3492f4a0e22e" (UID: "66baebd5-d041-48b1-a668-3492f4a0e22e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:42:34 crc kubenswrapper[5037]: E1126 14:42:34.313168 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8d6a012ccb6b42dcde0ba2d8c506bc30f418692c2a201991371c8169e7a5602d\": container with ID starting with 8d6a012ccb6b42dcde0ba2d8c506bc30f418692c2a201991371c8169e7a5602d not found: ID does not exist" containerID="8d6a012ccb6b42dcde0ba2d8c506bc30f418692c2a201991371c8169e7a5602d" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.313195 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d6a012ccb6b42dcde0ba2d8c506bc30f418692c2a201991371c8169e7a5602d"} err="failed to get container status \"8d6a012ccb6b42dcde0ba2d8c506bc30f418692c2a201991371c8169e7a5602d\": rpc error: code = NotFound desc = could not find container \"8d6a012ccb6b42dcde0ba2d8c506bc30f418692c2a201991371c8169e7a5602d\": container with ID starting with 8d6a012ccb6b42dcde0ba2d8c506bc30f418692c2a201991371c8169e7a5602d not found: ID does not exist" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.377838 5037 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/66baebd5-d041-48b1-a668-3492f4a0e22e-logs\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.377881 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66baebd5-d041-48b1-a668-3492f4a0e22e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.377894 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxjb2\" (UniqueName: \"kubernetes.io/projected/66baebd5-d041-48b1-a668-3492f4a0e22e-kube-api-access-wxjb2\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.377905 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66baebd5-d041-48b1-a668-3492f4a0e22e-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.447867 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.581845 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lx7c9\" (UniqueName: \"kubernetes.io/projected/6accd992-74e0-4a92-9886-557b3870fe81-kube-api-access-lx7c9\") pod \"6accd992-74e0-4a92-9886-557b3870fe81\" (UID: \"6accd992-74e0-4a92-9886-557b3870fe81\") " Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.581970 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6accd992-74e0-4a92-9886-557b3870fe81-config-data\") pod \"6accd992-74e0-4a92-9886-557b3870fe81\" (UID: \"6accd992-74e0-4a92-9886-557b3870fe81\") " Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.582182 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6accd992-74e0-4a92-9886-557b3870fe81-combined-ca-bundle\") pod \"6accd992-74e0-4a92-9886-557b3870fe81\" (UID: \"6accd992-74e0-4a92-9886-557b3870fe81\") " Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.607148 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6accd992-74e0-4a92-9886-557b3870fe81-kube-api-access-lx7c9" (OuterVolumeSpecName: "kube-api-access-lx7c9") pod "6accd992-74e0-4a92-9886-557b3870fe81" (UID: "6accd992-74e0-4a92-9886-557b3870fe81"). InnerVolumeSpecName "kube-api-access-lx7c9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.621938 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.629548 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6accd992-74e0-4a92-9886-557b3870fe81-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6accd992-74e0-4a92-9886-557b3870fe81" (UID: "6accd992-74e0-4a92-9886-557b3870fe81"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.638693 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6accd992-74e0-4a92-9886-557b3870fe81-config-data" (OuterVolumeSpecName: "config-data") pod "6accd992-74e0-4a92-9886-557b3870fe81" (UID: "6accd992-74e0-4a92-9886-557b3870fe81"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.643953 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.662461 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 26 14:42:34 crc kubenswrapper[5037]: E1126 14:42:34.666202 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66baebd5-d041-48b1-a668-3492f4a0e22e" containerName="nova-api-log" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.666231 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="66baebd5-d041-48b1-a668-3492f4a0e22e" containerName="nova-api-log" Nov 26 14:42:34 crc kubenswrapper[5037]: E1126 14:42:34.666313 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66baebd5-d041-48b1-a668-3492f4a0e22e" containerName="nova-api-api" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.666323 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="66baebd5-d041-48b1-a668-3492f4a0e22e" containerName="nova-api-api" Nov 26 14:42:34 crc kubenswrapper[5037]: E1126 14:42:34.666354 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6accd992-74e0-4a92-9886-557b3870fe81" containerName="nova-scheduler-scheduler" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.666516 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="6accd992-74e0-4a92-9886-557b3870fe81" containerName="nova-scheduler-scheduler" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.669462 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="66baebd5-d041-48b1-a668-3492f4a0e22e" containerName="nova-api-api" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.669499 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="66baebd5-d041-48b1-a668-3492f4a0e22e" containerName="nova-api-log" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.669533 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="6accd992-74e0-4a92-9886-557b3870fe81" containerName="nova-scheduler-scheduler" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.673029 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.678640 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.680950 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.685048 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6accd992-74e0-4a92-9886-557b3870fe81-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.685074 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lx7c9\" (UniqueName: \"kubernetes.io/projected/6accd992-74e0-4a92-9886-557b3870fe81-kube-api-access-lx7c9\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.685090 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6accd992-74e0-4a92-9886-557b3870fe81-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.791904 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5-logs\") pod \"nova-api-0\" (UID: \"dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5\") " pod="openstack/nova-api-0" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.792019 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pl22v\" (UniqueName: \"kubernetes.io/projected/dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5-kube-api-access-pl22v\") pod \"nova-api-0\" (UID: \"dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5\") " pod="openstack/nova-api-0" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.792108 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5\") " pod="openstack/nova-api-0" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.792239 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5-config-data\") pod \"nova-api-0\" (UID: \"dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5\") " pod="openstack/nova-api-0" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.894242 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pl22v\" (UniqueName: \"kubernetes.io/projected/dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5-kube-api-access-pl22v\") pod \"nova-api-0\" (UID: \"dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5\") " pod="openstack/nova-api-0" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.894804 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5\") " pod="openstack/nova-api-0" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.894882 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5-config-data\") pod \"nova-api-0\" (UID: \"dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5\") " pod="openstack/nova-api-0" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.894953 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5-logs\") pod \"nova-api-0\" (UID: \"dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5\") " pod="openstack/nova-api-0" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.895393 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5-logs\") pod \"nova-api-0\" (UID: \"dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5\") " pod="openstack/nova-api-0" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.899213 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5\") " pod="openstack/nova-api-0" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.900615 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5-config-data\") pod \"nova-api-0\" (UID: \"dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5\") " pod="openstack/nova-api-0" Nov 26 14:42:34 crc kubenswrapper[5037]: I1126 14:42:34.909977 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pl22v\" (UniqueName: \"kubernetes.io/projected/dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5-kube-api-access-pl22v\") pod \"nova-api-0\" (UID: \"dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5\") " pod="openstack/nova-api-0" Nov 26 14:42:35 crc kubenswrapper[5037]: I1126 14:42:35.005419 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 14:42:35 crc kubenswrapper[5037]: I1126 14:42:35.274024 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 14:42:35 crc kubenswrapper[5037]: I1126 14:42:35.274032 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6accd992-74e0-4a92-9886-557b3870fe81","Type":"ContainerDied","Data":"145a3a43774f79a83029e4111acc38715f64688bcfc3b1c0edef4eb00e60269e"} Nov 26 14:42:35 crc kubenswrapper[5037]: I1126 14:42:35.274343 5037 scope.go:117] "RemoveContainer" containerID="94f7804ae1bbef2c592db90c0d711750bdc3bc1e226f50e8dfdbf47efb3baae1" Nov 26 14:42:35 crc kubenswrapper[5037]: I1126 14:42:35.276241 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556","Type":"ContainerStarted","Data":"9b060f5248816d25c9577fffedca050a87900c7a204b7a345af98597b8fe0684"} Nov 26 14:42:35 crc kubenswrapper[5037]: I1126 14:42:35.314593 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 14:42:35 crc kubenswrapper[5037]: I1126 14:42:35.324510 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 14:42:35 crc kubenswrapper[5037]: I1126 14:42:35.339265 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 14:42:35 crc kubenswrapper[5037]: I1126 14:42:35.340724 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 14:42:35 crc kubenswrapper[5037]: I1126 14:42:35.345060 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 26 14:42:35 crc kubenswrapper[5037]: I1126 14:42:35.355988 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 14:42:35 crc kubenswrapper[5037]: I1126 14:42:35.407446 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55121b7f-eaf1-4a1c-a5e0-47e38fad69cd-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"55121b7f-eaf1-4a1c-a5e0-47e38fad69cd\") " pod="openstack/nova-scheduler-0" Nov 26 14:42:35 crc kubenswrapper[5037]: I1126 14:42:35.407647 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gmw2j\" (UniqueName: \"kubernetes.io/projected/55121b7f-eaf1-4a1c-a5e0-47e38fad69cd-kube-api-access-gmw2j\") pod \"nova-scheduler-0\" (UID: \"55121b7f-eaf1-4a1c-a5e0-47e38fad69cd\") " pod="openstack/nova-scheduler-0" Nov 26 14:42:35 crc kubenswrapper[5037]: I1126 14:42:35.407859 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55121b7f-eaf1-4a1c-a5e0-47e38fad69cd-config-data\") pod \"nova-scheduler-0\" (UID: \"55121b7f-eaf1-4a1c-a5e0-47e38fad69cd\") " pod="openstack/nova-scheduler-0" Nov 26 14:42:35 crc kubenswrapper[5037]: I1126 14:42:35.463840 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 14:42:35 crc kubenswrapper[5037]: I1126 14:42:35.509510 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gmw2j\" (UniqueName: \"kubernetes.io/projected/55121b7f-eaf1-4a1c-a5e0-47e38fad69cd-kube-api-access-gmw2j\") pod \"nova-scheduler-0\" (UID: \"55121b7f-eaf1-4a1c-a5e0-47e38fad69cd\") " pod="openstack/nova-scheduler-0" Nov 26 14:42:35 crc kubenswrapper[5037]: I1126 14:42:35.509602 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55121b7f-eaf1-4a1c-a5e0-47e38fad69cd-config-data\") pod \"nova-scheduler-0\" (UID: \"55121b7f-eaf1-4a1c-a5e0-47e38fad69cd\") " pod="openstack/nova-scheduler-0" Nov 26 14:42:35 crc kubenswrapper[5037]: I1126 14:42:35.509672 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55121b7f-eaf1-4a1c-a5e0-47e38fad69cd-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"55121b7f-eaf1-4a1c-a5e0-47e38fad69cd\") " pod="openstack/nova-scheduler-0" Nov 26 14:42:35 crc kubenswrapper[5037]: I1126 14:42:35.516113 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55121b7f-eaf1-4a1c-a5e0-47e38fad69cd-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"55121b7f-eaf1-4a1c-a5e0-47e38fad69cd\") " pod="openstack/nova-scheduler-0" Nov 26 14:42:35 crc kubenswrapper[5037]: I1126 14:42:35.521825 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55121b7f-eaf1-4a1c-a5e0-47e38fad69cd-config-data\") pod \"nova-scheduler-0\" (UID: \"55121b7f-eaf1-4a1c-a5e0-47e38fad69cd\") " pod="openstack/nova-scheduler-0" Nov 26 14:42:35 crc kubenswrapper[5037]: I1126 14:42:35.529899 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gmw2j\" (UniqueName: \"kubernetes.io/projected/55121b7f-eaf1-4a1c-a5e0-47e38fad69cd-kube-api-access-gmw2j\") pod \"nova-scheduler-0\" (UID: \"55121b7f-eaf1-4a1c-a5e0-47e38fad69cd\") " pod="openstack/nova-scheduler-0" Nov 26 14:42:35 crc kubenswrapper[5037]: I1126 14:42:35.669938 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 14:42:35 crc kubenswrapper[5037]: I1126 14:42:35.919545 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="66baebd5-d041-48b1-a668-3492f4a0e22e" path="/var/lib/kubelet/pods/66baebd5-d041-48b1-a668-3492f4a0e22e/volumes" Nov 26 14:42:35 crc kubenswrapper[5037]: I1126 14:42:35.920080 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6accd992-74e0-4a92-9886-557b3870fe81" path="/var/lib/kubelet/pods/6accd992-74e0-4a92-9886-557b3870fe81/volumes" Nov 26 14:42:36 crc kubenswrapper[5037]: I1126 14:42:36.306595 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556","Type":"ContainerStarted","Data":"4dfef52a4228c6ba6aa50763b516a103d58e8049f0dafa5ac0317240ca27827e"} Nov 26 14:42:36 crc kubenswrapper[5037]: I1126 14:42:36.316785 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5","Type":"ContainerStarted","Data":"8a0101817e876e40f7c47968b34dd4a562c0b54f491d0c59c2b95fbc3fb2d558"} Nov 26 14:42:36 crc kubenswrapper[5037]: I1126 14:42:36.316842 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5","Type":"ContainerStarted","Data":"be108884803bb557e8adf398c23206c72cc3c512672dba77b21bb6a31e2cce4b"} Nov 26 14:42:36 crc kubenswrapper[5037]: I1126 14:42:36.557091 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 14:42:37 crc kubenswrapper[5037]: I1126 14:42:37.327172 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"55121b7f-eaf1-4a1c-a5e0-47e38fad69cd","Type":"ContainerStarted","Data":"3a19d95d8c512c752245f45bd86e82d06ece7dca89b7728bb08c93a55df41ed3"} Nov 26 14:42:37 crc kubenswrapper[5037]: I1126 14:42:37.327577 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"55121b7f-eaf1-4a1c-a5e0-47e38fad69cd","Type":"ContainerStarted","Data":"9af1effdaaf9758f1bab39e17b3aa1a54845356e71139d5d2a54b787ab0e6a6d"} Nov 26 14:42:37 crc kubenswrapper[5037]: I1126 14:42:37.330118 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556","Type":"ContainerStarted","Data":"c8951bc9d395a7d7c6bc798a3590c0f41acfaec0d2abeb3f57d578b0817ef5bf"} Nov 26 14:42:37 crc kubenswrapper[5037]: I1126 14:42:37.332138 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5","Type":"ContainerStarted","Data":"27d186b91dbc239c18433f71264b7bead5b151ee8f447e7f0c53934638e1af3b"} Nov 26 14:42:37 crc kubenswrapper[5037]: I1126 14:42:37.351745 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.351732036 podStartE2EDuration="2.351732036s" podCreationTimestamp="2025-11-26 14:42:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:42:37.347589175 +0000 UTC m=+1624.144359359" watchObservedRunningTime="2025-11-26 14:42:37.351732036 +0000 UTC m=+1624.148502220" Nov 26 14:42:37 crc kubenswrapper[5037]: I1126 14:42:37.373714 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.373695513 podStartE2EDuration="3.373695513s" podCreationTimestamp="2025-11-26 14:42:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:42:37.366301762 +0000 UTC m=+1624.163071966" watchObservedRunningTime="2025-11-26 14:42:37.373695513 +0000 UTC m=+1624.170465687" Nov 26 14:42:38 crc kubenswrapper[5037]: I1126 14:42:38.349162 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556","Type":"ContainerStarted","Data":"f148d4c9904f2422f82cba5daa769dab150bbc012402b51083b120ea10222f71"} Nov 26 14:42:38 crc kubenswrapper[5037]: I1126 14:42:38.349627 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 14:42:38 crc kubenswrapper[5037]: I1126 14:42:38.908182 5037 scope.go:117] "RemoveContainer" containerID="5e69d7717514aa68d798cc4f8eee9b2d5d3e9666ca3b110c2cb4c6b90f9e1181" Nov 26 14:42:38 crc kubenswrapper[5037]: E1126 14:42:38.908952 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:42:40 crc kubenswrapper[5037]: I1126 14:42:40.579099 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 26 14:42:40 crc kubenswrapper[5037]: I1126 14:42:40.618661 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.7337696620000003 podStartE2EDuration="7.618637535s" podCreationTimestamp="2025-11-26 14:42:33 +0000 UTC" firstStartedPulling="2025-11-26 14:42:34.167100615 +0000 UTC m=+1620.963870799" lastFinishedPulling="2025-11-26 14:42:38.051968468 +0000 UTC m=+1624.848738672" observedRunningTime="2025-11-26 14:42:38.377054732 +0000 UTC m=+1625.173824926" watchObservedRunningTime="2025-11-26 14:42:40.618637535 +0000 UTC m=+1627.415407729" Nov 26 14:42:40 crc kubenswrapper[5037]: I1126 14:42:40.671118 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 26 14:42:41 crc kubenswrapper[5037]: I1126 14:42:41.609825 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 26 14:42:45 crc kubenswrapper[5037]: I1126 14:42:45.006957 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 14:42:45 crc kubenswrapper[5037]: I1126 14:42:45.007572 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 14:42:45 crc kubenswrapper[5037]: I1126 14:42:45.671057 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 26 14:42:45 crc kubenswrapper[5037]: I1126 14:42:45.703917 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 26 14:42:46 crc kubenswrapper[5037]: I1126 14:42:46.089624 5037 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.189:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 14:42:46 crc kubenswrapper[5037]: I1126 14:42:46.089691 5037 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.189:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 14:42:46 crc kubenswrapper[5037]: I1126 14:42:46.464835 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 26 14:42:50 crc kubenswrapper[5037]: I1126 14:42:50.910552 5037 scope.go:117] "RemoveContainer" containerID="5e69d7717514aa68d798cc4f8eee9b2d5d3e9666ca3b110c2cb4c6b90f9e1181" Nov 26 14:42:50 crc kubenswrapper[5037]: E1126 14:42:50.911734 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:42:52 crc kubenswrapper[5037]: E1126 14:42:52.108169 5037 manager.go:1116] Failed to create existing container: /kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6accd992_74e0_4a92_9886_557b3870fe81.slice/crio-145a3a43774f79a83029e4111acc38715f64688bcfc3b1c0edef4eb00e60269e: Error finding container 145a3a43774f79a83029e4111acc38715f64688bcfc3b1c0edef4eb00e60269e: Status 404 returned error can't find the container with id 145a3a43774f79a83029e4111acc38715f64688bcfc3b1c0edef4eb00e60269e Nov 26 14:42:52 crc kubenswrapper[5037]: E1126 14:42:52.348748 5037 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod87596e56_9580_4393_b653_3cf33e21cc30.slice/crio-d4490cdc702eb284fb560c1fb61da2d26b6df8afaad2620c0681ff6b028fa449.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb95f8258_ffae_4330_9554_fd42958945cc.slice/crio-b07cbfa946d18fe5aa6c12bbbf5f9def24ad7c4fb03f3764fc72994f16512cc6.scope\": RecentStats: unable to find data in memory cache]" Nov 26 14:42:52 crc kubenswrapper[5037]: I1126 14:42:52.489233 5037 generic.go:334] "Generic (PLEG): container finished" podID="b95f8258-ffae-4330-9554-fd42958945cc" containerID="b07cbfa946d18fe5aa6c12bbbf5f9def24ad7c4fb03f3764fc72994f16512cc6" exitCode=137 Nov 26 14:42:52 crc kubenswrapper[5037]: I1126 14:42:52.489341 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"b95f8258-ffae-4330-9554-fd42958945cc","Type":"ContainerDied","Data":"b07cbfa946d18fe5aa6c12bbbf5f9def24ad7c4fb03f3764fc72994f16512cc6"} Nov 26 14:42:52 crc kubenswrapper[5037]: I1126 14:42:52.491452 5037 generic.go:334] "Generic (PLEG): container finished" podID="87596e56-9580-4393-b653-3cf33e21cc30" containerID="d4490cdc702eb284fb560c1fb61da2d26b6df8afaad2620c0681ff6b028fa449" exitCode=137 Nov 26 14:42:52 crc kubenswrapper[5037]: I1126 14:42:52.491498 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"87596e56-9580-4393-b653-3cf33e21cc30","Type":"ContainerDied","Data":"d4490cdc702eb284fb560c1fb61da2d26b6df8afaad2620c0681ff6b028fa449"} Nov 26 14:42:52 crc kubenswrapper[5037]: I1126 14:42:52.491525 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"87596e56-9580-4393-b653-3cf33e21cc30","Type":"ContainerDied","Data":"21263f4488ad7005b255cee1683743da90c7bd438db5b4ae7317a7efc298ca6e"} Nov 26 14:42:52 crc kubenswrapper[5037]: I1126 14:42:52.491537 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="21263f4488ad7005b255cee1683743da90c7bd438db5b4ae7317a7efc298ca6e" Nov 26 14:42:52 crc kubenswrapper[5037]: I1126 14:42:52.579498 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 14:42:52 crc kubenswrapper[5037]: I1126 14:42:52.588052 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 14:42:52 crc kubenswrapper[5037]: I1126 14:42:52.655799 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87596e56-9580-4393-b653-3cf33e21cc30-logs\") pod \"87596e56-9580-4393-b653-3cf33e21cc30\" (UID: \"87596e56-9580-4393-b653-3cf33e21cc30\") " Nov 26 14:42:52 crc kubenswrapper[5037]: I1126 14:42:52.655924 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87596e56-9580-4393-b653-3cf33e21cc30-combined-ca-bundle\") pod \"87596e56-9580-4393-b653-3cf33e21cc30\" (UID: \"87596e56-9580-4393-b653-3cf33e21cc30\") " Nov 26 14:42:52 crc kubenswrapper[5037]: I1126 14:42:52.655968 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-grjbg\" (UniqueName: \"kubernetes.io/projected/b95f8258-ffae-4330-9554-fd42958945cc-kube-api-access-grjbg\") pod \"b95f8258-ffae-4330-9554-fd42958945cc\" (UID: \"b95f8258-ffae-4330-9554-fd42958945cc\") " Nov 26 14:42:52 crc kubenswrapper[5037]: I1126 14:42:52.656089 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b95f8258-ffae-4330-9554-fd42958945cc-combined-ca-bundle\") pod \"b95f8258-ffae-4330-9554-fd42958945cc\" (UID: \"b95f8258-ffae-4330-9554-fd42958945cc\") " Nov 26 14:42:52 crc kubenswrapper[5037]: I1126 14:42:52.656136 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87596e56-9580-4393-b653-3cf33e21cc30-config-data\") pod \"87596e56-9580-4393-b653-3cf33e21cc30\" (UID: \"87596e56-9580-4393-b653-3cf33e21cc30\") " Nov 26 14:42:52 crc kubenswrapper[5037]: I1126 14:42:52.656238 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qjg87\" (UniqueName: \"kubernetes.io/projected/87596e56-9580-4393-b653-3cf33e21cc30-kube-api-access-qjg87\") pod \"87596e56-9580-4393-b653-3cf33e21cc30\" (UID: \"87596e56-9580-4393-b653-3cf33e21cc30\") " Nov 26 14:42:52 crc kubenswrapper[5037]: I1126 14:42:52.656261 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/87596e56-9580-4393-b653-3cf33e21cc30-logs" (OuterVolumeSpecName: "logs") pod "87596e56-9580-4393-b653-3cf33e21cc30" (UID: "87596e56-9580-4393-b653-3cf33e21cc30"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:42:52 crc kubenswrapper[5037]: I1126 14:42:52.656311 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b95f8258-ffae-4330-9554-fd42958945cc-config-data\") pod \"b95f8258-ffae-4330-9554-fd42958945cc\" (UID: \"b95f8258-ffae-4330-9554-fd42958945cc\") " Nov 26 14:42:52 crc kubenswrapper[5037]: I1126 14:42:52.657409 5037 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87596e56-9580-4393-b653-3cf33e21cc30-logs\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:52 crc kubenswrapper[5037]: I1126 14:42:52.662142 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b95f8258-ffae-4330-9554-fd42958945cc-kube-api-access-grjbg" (OuterVolumeSpecName: "kube-api-access-grjbg") pod "b95f8258-ffae-4330-9554-fd42958945cc" (UID: "b95f8258-ffae-4330-9554-fd42958945cc"). InnerVolumeSpecName "kube-api-access-grjbg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:42:52 crc kubenswrapper[5037]: I1126 14:42:52.668258 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87596e56-9580-4393-b653-3cf33e21cc30-kube-api-access-qjg87" (OuterVolumeSpecName: "kube-api-access-qjg87") pod "87596e56-9580-4393-b653-3cf33e21cc30" (UID: "87596e56-9580-4393-b653-3cf33e21cc30"). InnerVolumeSpecName "kube-api-access-qjg87". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:42:52 crc kubenswrapper[5037]: I1126 14:42:52.692825 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87596e56-9580-4393-b653-3cf33e21cc30-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "87596e56-9580-4393-b653-3cf33e21cc30" (UID: "87596e56-9580-4393-b653-3cf33e21cc30"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:42:52 crc kubenswrapper[5037]: I1126 14:42:52.697822 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b95f8258-ffae-4330-9554-fd42958945cc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b95f8258-ffae-4330-9554-fd42958945cc" (UID: "b95f8258-ffae-4330-9554-fd42958945cc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:42:52 crc kubenswrapper[5037]: I1126 14:42:52.712051 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b95f8258-ffae-4330-9554-fd42958945cc-config-data" (OuterVolumeSpecName: "config-data") pod "b95f8258-ffae-4330-9554-fd42958945cc" (UID: "b95f8258-ffae-4330-9554-fd42958945cc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:42:52 crc kubenswrapper[5037]: I1126 14:42:52.714268 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87596e56-9580-4393-b653-3cf33e21cc30-config-data" (OuterVolumeSpecName: "config-data") pod "87596e56-9580-4393-b653-3cf33e21cc30" (UID: "87596e56-9580-4393-b653-3cf33e21cc30"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:42:52 crc kubenswrapper[5037]: I1126 14:42:52.759354 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87596e56-9580-4393-b653-3cf33e21cc30-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:52 crc kubenswrapper[5037]: I1126 14:42:52.759391 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-grjbg\" (UniqueName: \"kubernetes.io/projected/b95f8258-ffae-4330-9554-fd42958945cc-kube-api-access-grjbg\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:52 crc kubenswrapper[5037]: I1126 14:42:52.759402 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b95f8258-ffae-4330-9554-fd42958945cc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:52 crc kubenswrapper[5037]: I1126 14:42:52.759410 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87596e56-9580-4393-b653-3cf33e21cc30-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:52 crc kubenswrapper[5037]: I1126 14:42:52.759419 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qjg87\" (UniqueName: \"kubernetes.io/projected/87596e56-9580-4393-b653-3cf33e21cc30-kube-api-access-qjg87\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:52 crc kubenswrapper[5037]: I1126 14:42:52.759428 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b95f8258-ffae-4330-9554-fd42958945cc-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.504647 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.504692 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"b95f8258-ffae-4330-9554-fd42958945cc","Type":"ContainerDied","Data":"348120f325cec7e3488596365317a76ea18ae6d57f6b9913cfe38b26d7f18fbd"} Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.504759 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.504772 5037 scope.go:117] "RemoveContainer" containerID="b07cbfa946d18fe5aa6c12bbbf5f9def24ad7c4fb03f3764fc72994f16512cc6" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.563205 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.579774 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.595620 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.607462 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.637804 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 26 14:42:53 crc kubenswrapper[5037]: E1126 14:42:53.639041 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b95f8258-ffae-4330-9554-fd42958945cc" containerName="nova-cell1-novncproxy-novncproxy" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.639066 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="b95f8258-ffae-4330-9554-fd42958945cc" containerName="nova-cell1-novncproxy-novncproxy" Nov 26 14:42:53 crc kubenswrapper[5037]: E1126 14:42:53.639088 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87596e56-9580-4393-b653-3cf33e21cc30" containerName="nova-metadata-metadata" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.639094 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="87596e56-9580-4393-b653-3cf33e21cc30" containerName="nova-metadata-metadata" Nov 26 14:42:53 crc kubenswrapper[5037]: E1126 14:42:53.639104 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87596e56-9580-4393-b653-3cf33e21cc30" containerName="nova-metadata-log" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.639111 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="87596e56-9580-4393-b653-3cf33e21cc30" containerName="nova-metadata-log" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.639613 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="87596e56-9580-4393-b653-3cf33e21cc30" containerName="nova-metadata-log" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.639663 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="b95f8258-ffae-4330-9554-fd42958945cc" containerName="nova-cell1-novncproxy-novncproxy" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.639678 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="87596e56-9580-4393-b653-3cf33e21cc30" containerName="nova-metadata-metadata" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.643629 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.645578 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.645790 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.648972 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.653440 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.654726 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.654843 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.655238 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.656237 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.669912 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.675997 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/4408c030-a5ac-49ae-9361-54cbe3c27108-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"4408c030-a5ac-49ae-9361-54cbe3c27108\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.676102 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/4408c030-a5ac-49ae-9361-54cbe3c27108-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"4408c030-a5ac-49ae-9361-54cbe3c27108\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.676124 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/818cd85a-3db7-4a42-ac8e-5bf11c024493-logs\") pod \"nova-metadata-0\" (UID: \"818cd85a-3db7-4a42-ac8e-5bf11c024493\") " pod="openstack/nova-metadata-0" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.676157 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/818cd85a-3db7-4a42-ac8e-5bf11c024493-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"818cd85a-3db7-4a42-ac8e-5bf11c024493\") " pod="openstack/nova-metadata-0" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.676191 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/818cd85a-3db7-4a42-ac8e-5bf11c024493-config-data\") pod \"nova-metadata-0\" (UID: \"818cd85a-3db7-4a42-ac8e-5bf11c024493\") " pod="openstack/nova-metadata-0" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.676216 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/818cd85a-3db7-4a42-ac8e-5bf11c024493-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"818cd85a-3db7-4a42-ac8e-5bf11c024493\") " pod="openstack/nova-metadata-0" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.676238 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4408c030-a5ac-49ae-9361-54cbe3c27108-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"4408c030-a5ac-49ae-9361-54cbe3c27108\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.676332 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t7htm\" (UniqueName: \"kubernetes.io/projected/4408c030-a5ac-49ae-9361-54cbe3c27108-kube-api-access-t7htm\") pod \"nova-cell1-novncproxy-0\" (UID: \"4408c030-a5ac-49ae-9361-54cbe3c27108\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.676393 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gdb5c\" (UniqueName: \"kubernetes.io/projected/818cd85a-3db7-4a42-ac8e-5bf11c024493-kube-api-access-gdb5c\") pod \"nova-metadata-0\" (UID: \"818cd85a-3db7-4a42-ac8e-5bf11c024493\") " pod="openstack/nova-metadata-0" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.676697 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4408c030-a5ac-49ae-9361-54cbe3c27108-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"4408c030-a5ac-49ae-9361-54cbe3c27108\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.778707 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/4408c030-a5ac-49ae-9361-54cbe3c27108-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"4408c030-a5ac-49ae-9361-54cbe3c27108\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.778779 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/4408c030-a5ac-49ae-9361-54cbe3c27108-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"4408c030-a5ac-49ae-9361-54cbe3c27108\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.778803 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/818cd85a-3db7-4a42-ac8e-5bf11c024493-logs\") pod \"nova-metadata-0\" (UID: \"818cd85a-3db7-4a42-ac8e-5bf11c024493\") " pod="openstack/nova-metadata-0" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.778827 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/818cd85a-3db7-4a42-ac8e-5bf11c024493-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"818cd85a-3db7-4a42-ac8e-5bf11c024493\") " pod="openstack/nova-metadata-0" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.778847 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/818cd85a-3db7-4a42-ac8e-5bf11c024493-config-data\") pod \"nova-metadata-0\" (UID: \"818cd85a-3db7-4a42-ac8e-5bf11c024493\") " pod="openstack/nova-metadata-0" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.778882 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/818cd85a-3db7-4a42-ac8e-5bf11c024493-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"818cd85a-3db7-4a42-ac8e-5bf11c024493\") " pod="openstack/nova-metadata-0" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.778917 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4408c030-a5ac-49ae-9361-54cbe3c27108-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"4408c030-a5ac-49ae-9361-54cbe3c27108\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.778958 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t7htm\" (UniqueName: \"kubernetes.io/projected/4408c030-a5ac-49ae-9361-54cbe3c27108-kube-api-access-t7htm\") pod \"nova-cell1-novncproxy-0\" (UID: \"4408c030-a5ac-49ae-9361-54cbe3c27108\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.778983 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gdb5c\" (UniqueName: \"kubernetes.io/projected/818cd85a-3db7-4a42-ac8e-5bf11c024493-kube-api-access-gdb5c\") pod \"nova-metadata-0\" (UID: \"818cd85a-3db7-4a42-ac8e-5bf11c024493\") " pod="openstack/nova-metadata-0" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.779032 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4408c030-a5ac-49ae-9361-54cbe3c27108-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"4408c030-a5ac-49ae-9361-54cbe3c27108\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.779874 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/818cd85a-3db7-4a42-ac8e-5bf11c024493-logs\") pod \"nova-metadata-0\" (UID: \"818cd85a-3db7-4a42-ac8e-5bf11c024493\") " pod="openstack/nova-metadata-0" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.784253 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4408c030-a5ac-49ae-9361-54cbe3c27108-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"4408c030-a5ac-49ae-9361-54cbe3c27108\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.784364 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/4408c030-a5ac-49ae-9361-54cbe3c27108-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"4408c030-a5ac-49ae-9361-54cbe3c27108\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.785157 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/818cd85a-3db7-4a42-ac8e-5bf11c024493-config-data\") pod \"nova-metadata-0\" (UID: \"818cd85a-3db7-4a42-ac8e-5bf11c024493\") " pod="openstack/nova-metadata-0" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.786825 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4408c030-a5ac-49ae-9361-54cbe3c27108-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"4408c030-a5ac-49ae-9361-54cbe3c27108\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.791895 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/818cd85a-3db7-4a42-ac8e-5bf11c024493-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"818cd85a-3db7-4a42-ac8e-5bf11c024493\") " pod="openstack/nova-metadata-0" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.792520 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/4408c030-a5ac-49ae-9361-54cbe3c27108-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"4408c030-a5ac-49ae-9361-54cbe3c27108\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.797918 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/818cd85a-3db7-4a42-ac8e-5bf11c024493-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"818cd85a-3db7-4a42-ac8e-5bf11c024493\") " pod="openstack/nova-metadata-0" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.806786 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t7htm\" (UniqueName: \"kubernetes.io/projected/4408c030-a5ac-49ae-9361-54cbe3c27108-kube-api-access-t7htm\") pod \"nova-cell1-novncproxy-0\" (UID: \"4408c030-a5ac-49ae-9361-54cbe3c27108\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.812381 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gdb5c\" (UniqueName: \"kubernetes.io/projected/818cd85a-3db7-4a42-ac8e-5bf11c024493-kube-api-access-gdb5c\") pod \"nova-metadata-0\" (UID: \"818cd85a-3db7-4a42-ac8e-5bf11c024493\") " pod="openstack/nova-metadata-0" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.918329 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87596e56-9580-4393-b653-3cf33e21cc30" path="/var/lib/kubelet/pods/87596e56-9580-4393-b653-3cf33e21cc30/volumes" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.919138 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b95f8258-ffae-4330-9554-fd42958945cc" path="/var/lib/kubelet/pods/b95f8258-ffae-4330-9554-fd42958945cc/volumes" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.966808 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 14:42:53 crc kubenswrapper[5037]: I1126 14:42:53.979908 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 14:42:54 crc kubenswrapper[5037]: W1126 14:42:54.433014 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod818cd85a_3db7_4a42_ac8e_5bf11c024493.slice/crio-cfb3dd29df00c0079de45a4f0ecd63155e8ed7fe67c57b9605c7f70080540920 WatchSource:0}: Error finding container cfb3dd29df00c0079de45a4f0ecd63155e8ed7fe67c57b9605c7f70080540920: Status 404 returned error can't find the container with id cfb3dd29df00c0079de45a4f0ecd63155e8ed7fe67c57b9605c7f70080540920 Nov 26 14:42:54 crc kubenswrapper[5037]: I1126 14:42:54.449898 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 14:42:54 crc kubenswrapper[5037]: I1126 14:42:54.498017 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 14:42:54 crc kubenswrapper[5037]: W1126 14:42:54.500927 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4408c030_a5ac_49ae_9361_54cbe3c27108.slice/crio-33d3a546ce44fa80f3ce312259e709179bc4d386efc53586e603557016ee9221 WatchSource:0}: Error finding container 33d3a546ce44fa80f3ce312259e709179bc4d386efc53586e603557016ee9221: Status 404 returned error can't find the container with id 33d3a546ce44fa80f3ce312259e709179bc4d386efc53586e603557016ee9221 Nov 26 14:42:54 crc kubenswrapper[5037]: I1126 14:42:54.517568 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"818cd85a-3db7-4a42-ac8e-5bf11c024493","Type":"ContainerStarted","Data":"cfb3dd29df00c0079de45a4f0ecd63155e8ed7fe67c57b9605c7f70080540920"} Nov 26 14:42:54 crc kubenswrapper[5037]: I1126 14:42:54.518676 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"4408c030-a5ac-49ae-9361-54cbe3c27108","Type":"ContainerStarted","Data":"33d3a546ce44fa80f3ce312259e709179bc4d386efc53586e603557016ee9221"} Nov 26 14:42:55 crc kubenswrapper[5037]: I1126 14:42:55.010427 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 26 14:42:55 crc kubenswrapper[5037]: I1126 14:42:55.010982 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 26 14:42:55 crc kubenswrapper[5037]: I1126 14:42:55.013378 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 26 14:42:55 crc kubenswrapper[5037]: I1126 14:42:55.013910 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 26 14:42:55 crc kubenswrapper[5037]: I1126 14:42:55.537116 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"818cd85a-3db7-4a42-ac8e-5bf11c024493","Type":"ContainerStarted","Data":"c6118d2fae4740b5164dbd3b99d85a64d2afc4b33c78c0221ff4c7e1f97aaf0f"} Nov 26 14:42:55 crc kubenswrapper[5037]: I1126 14:42:55.537504 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"818cd85a-3db7-4a42-ac8e-5bf11c024493","Type":"ContainerStarted","Data":"3f18a20df312ccff720b79feb2c26822f029e7d1990ee69dcd19116d22023de4"} Nov 26 14:42:55 crc kubenswrapper[5037]: I1126 14:42:55.540615 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"4408c030-a5ac-49ae-9361-54cbe3c27108","Type":"ContainerStarted","Data":"4f57619ebc65ee19c82e274478cdb8f19dd8e02a6b90642fbf2271294bdfb236"} Nov 26 14:42:55 crc kubenswrapper[5037]: I1126 14:42:55.541032 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 26 14:42:55 crc kubenswrapper[5037]: I1126 14:42:55.545463 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 26 14:42:55 crc kubenswrapper[5037]: I1126 14:42:55.562765 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.5627435309999997 podStartE2EDuration="2.562743531s" podCreationTimestamp="2025-11-26 14:42:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:42:55.558989719 +0000 UTC m=+1642.355759943" watchObservedRunningTime="2025-11-26 14:42:55.562743531 +0000 UTC m=+1642.359513715" Nov 26 14:42:55 crc kubenswrapper[5037]: I1126 14:42:55.610467 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.610447985 podStartE2EDuration="2.610447985s" podCreationTimestamp="2025-11-26 14:42:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:42:55.598909883 +0000 UTC m=+1642.395680067" watchObservedRunningTime="2025-11-26 14:42:55.610447985 +0000 UTC m=+1642.407218169" Nov 26 14:42:55 crc kubenswrapper[5037]: I1126 14:42:55.732581 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-55bfb77665-gz5lz"] Nov 26 14:42:55 crc kubenswrapper[5037]: I1126 14:42:55.734226 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55bfb77665-gz5lz" Nov 26 14:42:55 crc kubenswrapper[5037]: I1126 14:42:55.761222 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55bfb77665-gz5lz"] Nov 26 14:42:55 crc kubenswrapper[5037]: I1126 14:42:55.828017 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/257e4b94-6b37-4243-8e8a-6bd47f0a5603-ovsdbserver-nb\") pod \"dnsmasq-dns-55bfb77665-gz5lz\" (UID: \"257e4b94-6b37-4243-8e8a-6bd47f0a5603\") " pod="openstack/dnsmasq-dns-55bfb77665-gz5lz" Nov 26 14:42:55 crc kubenswrapper[5037]: I1126 14:42:55.828106 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/257e4b94-6b37-4243-8e8a-6bd47f0a5603-config\") pod \"dnsmasq-dns-55bfb77665-gz5lz\" (UID: \"257e4b94-6b37-4243-8e8a-6bd47f0a5603\") " pod="openstack/dnsmasq-dns-55bfb77665-gz5lz" Nov 26 14:42:55 crc kubenswrapper[5037]: I1126 14:42:55.828136 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/257e4b94-6b37-4243-8e8a-6bd47f0a5603-dns-swift-storage-0\") pod \"dnsmasq-dns-55bfb77665-gz5lz\" (UID: \"257e4b94-6b37-4243-8e8a-6bd47f0a5603\") " pod="openstack/dnsmasq-dns-55bfb77665-gz5lz" Nov 26 14:42:55 crc kubenswrapper[5037]: I1126 14:42:55.828169 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/257e4b94-6b37-4243-8e8a-6bd47f0a5603-ovsdbserver-sb\") pod \"dnsmasq-dns-55bfb77665-gz5lz\" (UID: \"257e4b94-6b37-4243-8e8a-6bd47f0a5603\") " pod="openstack/dnsmasq-dns-55bfb77665-gz5lz" Nov 26 14:42:55 crc kubenswrapper[5037]: I1126 14:42:55.828199 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/257e4b94-6b37-4243-8e8a-6bd47f0a5603-dns-svc\") pod \"dnsmasq-dns-55bfb77665-gz5lz\" (UID: \"257e4b94-6b37-4243-8e8a-6bd47f0a5603\") " pod="openstack/dnsmasq-dns-55bfb77665-gz5lz" Nov 26 14:42:55 crc kubenswrapper[5037]: I1126 14:42:55.828224 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gsj2d\" (UniqueName: \"kubernetes.io/projected/257e4b94-6b37-4243-8e8a-6bd47f0a5603-kube-api-access-gsj2d\") pod \"dnsmasq-dns-55bfb77665-gz5lz\" (UID: \"257e4b94-6b37-4243-8e8a-6bd47f0a5603\") " pod="openstack/dnsmasq-dns-55bfb77665-gz5lz" Nov 26 14:42:55 crc kubenswrapper[5037]: I1126 14:42:55.931579 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/257e4b94-6b37-4243-8e8a-6bd47f0a5603-ovsdbserver-nb\") pod \"dnsmasq-dns-55bfb77665-gz5lz\" (UID: \"257e4b94-6b37-4243-8e8a-6bd47f0a5603\") " pod="openstack/dnsmasq-dns-55bfb77665-gz5lz" Nov 26 14:42:55 crc kubenswrapper[5037]: I1126 14:42:55.931696 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/257e4b94-6b37-4243-8e8a-6bd47f0a5603-config\") pod \"dnsmasq-dns-55bfb77665-gz5lz\" (UID: \"257e4b94-6b37-4243-8e8a-6bd47f0a5603\") " pod="openstack/dnsmasq-dns-55bfb77665-gz5lz" Nov 26 14:42:55 crc kubenswrapper[5037]: I1126 14:42:55.931732 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/257e4b94-6b37-4243-8e8a-6bd47f0a5603-dns-swift-storage-0\") pod \"dnsmasq-dns-55bfb77665-gz5lz\" (UID: \"257e4b94-6b37-4243-8e8a-6bd47f0a5603\") " pod="openstack/dnsmasq-dns-55bfb77665-gz5lz" Nov 26 14:42:55 crc kubenswrapper[5037]: I1126 14:42:55.931761 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/257e4b94-6b37-4243-8e8a-6bd47f0a5603-ovsdbserver-sb\") pod \"dnsmasq-dns-55bfb77665-gz5lz\" (UID: \"257e4b94-6b37-4243-8e8a-6bd47f0a5603\") " pod="openstack/dnsmasq-dns-55bfb77665-gz5lz" Nov 26 14:42:55 crc kubenswrapper[5037]: I1126 14:42:55.931794 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/257e4b94-6b37-4243-8e8a-6bd47f0a5603-dns-svc\") pod \"dnsmasq-dns-55bfb77665-gz5lz\" (UID: \"257e4b94-6b37-4243-8e8a-6bd47f0a5603\") " pod="openstack/dnsmasq-dns-55bfb77665-gz5lz" Nov 26 14:42:55 crc kubenswrapper[5037]: I1126 14:42:55.931815 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gsj2d\" (UniqueName: \"kubernetes.io/projected/257e4b94-6b37-4243-8e8a-6bd47f0a5603-kube-api-access-gsj2d\") pod \"dnsmasq-dns-55bfb77665-gz5lz\" (UID: \"257e4b94-6b37-4243-8e8a-6bd47f0a5603\") " pod="openstack/dnsmasq-dns-55bfb77665-gz5lz" Nov 26 14:42:55 crc kubenswrapper[5037]: I1126 14:42:55.933129 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/257e4b94-6b37-4243-8e8a-6bd47f0a5603-ovsdbserver-nb\") pod \"dnsmasq-dns-55bfb77665-gz5lz\" (UID: \"257e4b94-6b37-4243-8e8a-6bd47f0a5603\") " pod="openstack/dnsmasq-dns-55bfb77665-gz5lz" Nov 26 14:42:55 crc kubenswrapper[5037]: I1126 14:42:55.933961 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/257e4b94-6b37-4243-8e8a-6bd47f0a5603-ovsdbserver-sb\") pod \"dnsmasq-dns-55bfb77665-gz5lz\" (UID: \"257e4b94-6b37-4243-8e8a-6bd47f0a5603\") " pod="openstack/dnsmasq-dns-55bfb77665-gz5lz" Nov 26 14:42:55 crc kubenswrapper[5037]: I1126 14:42:55.934508 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/257e4b94-6b37-4243-8e8a-6bd47f0a5603-config\") pod \"dnsmasq-dns-55bfb77665-gz5lz\" (UID: \"257e4b94-6b37-4243-8e8a-6bd47f0a5603\") " pod="openstack/dnsmasq-dns-55bfb77665-gz5lz" Nov 26 14:42:55 crc kubenswrapper[5037]: I1126 14:42:55.936867 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/257e4b94-6b37-4243-8e8a-6bd47f0a5603-dns-swift-storage-0\") pod \"dnsmasq-dns-55bfb77665-gz5lz\" (UID: \"257e4b94-6b37-4243-8e8a-6bd47f0a5603\") " pod="openstack/dnsmasq-dns-55bfb77665-gz5lz" Nov 26 14:42:55 crc kubenswrapper[5037]: I1126 14:42:55.939073 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/257e4b94-6b37-4243-8e8a-6bd47f0a5603-dns-svc\") pod \"dnsmasq-dns-55bfb77665-gz5lz\" (UID: \"257e4b94-6b37-4243-8e8a-6bd47f0a5603\") " pod="openstack/dnsmasq-dns-55bfb77665-gz5lz" Nov 26 14:42:55 crc kubenswrapper[5037]: I1126 14:42:55.970434 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gsj2d\" (UniqueName: \"kubernetes.io/projected/257e4b94-6b37-4243-8e8a-6bd47f0a5603-kube-api-access-gsj2d\") pod \"dnsmasq-dns-55bfb77665-gz5lz\" (UID: \"257e4b94-6b37-4243-8e8a-6bd47f0a5603\") " pod="openstack/dnsmasq-dns-55bfb77665-gz5lz" Nov 26 14:42:56 crc kubenswrapper[5037]: I1126 14:42:56.075734 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55bfb77665-gz5lz" Nov 26 14:42:56 crc kubenswrapper[5037]: I1126 14:42:56.568434 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55bfb77665-gz5lz"] Nov 26 14:42:56 crc kubenswrapper[5037]: W1126 14:42:56.570652 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod257e4b94_6b37_4243_8e8a_6bd47f0a5603.slice/crio-74fe444d85526afbafbf495f9e22b89bbb20992f9499e071b44abb627b0e2476 WatchSource:0}: Error finding container 74fe444d85526afbafbf495f9e22b89bbb20992f9499e071b44abb627b0e2476: Status 404 returned error can't find the container with id 74fe444d85526afbafbf495f9e22b89bbb20992f9499e071b44abb627b0e2476 Nov 26 14:42:57 crc kubenswrapper[5037]: I1126 14:42:57.559197 5037 generic.go:334] "Generic (PLEG): container finished" podID="257e4b94-6b37-4243-8e8a-6bd47f0a5603" containerID="600eca6ef64f5ec22ada4dfc68aa6316b18796bc98f34fd7f889faa193bbcf49" exitCode=0 Nov 26 14:42:57 crc kubenswrapper[5037]: I1126 14:42:57.559382 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55bfb77665-gz5lz" event={"ID":"257e4b94-6b37-4243-8e8a-6bd47f0a5603","Type":"ContainerDied","Data":"600eca6ef64f5ec22ada4dfc68aa6316b18796bc98f34fd7f889faa193bbcf49"} Nov 26 14:42:57 crc kubenswrapper[5037]: I1126 14:42:57.559720 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55bfb77665-gz5lz" event={"ID":"257e4b94-6b37-4243-8e8a-6bd47f0a5603","Type":"ContainerStarted","Data":"74fe444d85526afbafbf495f9e22b89bbb20992f9499e071b44abb627b0e2476"} Nov 26 14:42:57 crc kubenswrapper[5037]: I1126 14:42:57.799932 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:42:57 crc kubenswrapper[5037]: I1126 14:42:57.800676 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fdc28ac8-0a1f-4cee-9f21-a9eb621b5556" containerName="ceilometer-central-agent" containerID="cri-o://9b060f5248816d25c9577fffedca050a87900c7a204b7a345af98597b8fe0684" gracePeriod=30 Nov 26 14:42:57 crc kubenswrapper[5037]: I1126 14:42:57.800873 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fdc28ac8-0a1f-4cee-9f21-a9eb621b5556" containerName="ceilometer-notification-agent" containerID="cri-o://4dfef52a4228c6ba6aa50763b516a103d58e8049f0dafa5ac0317240ca27827e" gracePeriod=30 Nov 26 14:42:57 crc kubenswrapper[5037]: I1126 14:42:57.800927 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fdc28ac8-0a1f-4cee-9f21-a9eb621b5556" containerName="proxy-httpd" containerID="cri-o://f148d4c9904f2422f82cba5daa769dab150bbc012402b51083b120ea10222f71" gracePeriod=30 Nov 26 14:42:57 crc kubenswrapper[5037]: I1126 14:42:57.800978 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fdc28ac8-0a1f-4cee-9f21-a9eb621b5556" containerName="sg-core" containerID="cri-o://c8951bc9d395a7d7c6bc798a3590c0f41acfaec0d2abeb3f57d578b0817ef5bf" gracePeriod=30 Nov 26 14:42:57 crc kubenswrapper[5037]: I1126 14:42:57.911208 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="fdc28ac8-0a1f-4cee-9f21-a9eb621b5556" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.188:3000/\": read tcp 10.217.0.2:33320->10.217.0.188:3000: read: connection reset by peer" Nov 26 14:42:58 crc kubenswrapper[5037]: I1126 14:42:58.572315 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55bfb77665-gz5lz" event={"ID":"257e4b94-6b37-4243-8e8a-6bd47f0a5603","Type":"ContainerStarted","Data":"084147140f433c529e6be96361e0c147011e55c6ffe26e746fd701df366832bd"} Nov 26 14:42:58 crc kubenswrapper[5037]: I1126 14:42:58.573579 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-55bfb77665-gz5lz" Nov 26 14:42:58 crc kubenswrapper[5037]: I1126 14:42:58.577500 5037 generic.go:334] "Generic (PLEG): container finished" podID="fdc28ac8-0a1f-4cee-9f21-a9eb621b5556" containerID="f148d4c9904f2422f82cba5daa769dab150bbc012402b51083b120ea10222f71" exitCode=0 Nov 26 14:42:58 crc kubenswrapper[5037]: I1126 14:42:58.577539 5037 generic.go:334] "Generic (PLEG): container finished" podID="fdc28ac8-0a1f-4cee-9f21-a9eb621b5556" containerID="c8951bc9d395a7d7c6bc798a3590c0f41acfaec0d2abeb3f57d578b0817ef5bf" exitCode=2 Nov 26 14:42:58 crc kubenswrapper[5037]: I1126 14:42:58.577551 5037 generic.go:334] "Generic (PLEG): container finished" podID="fdc28ac8-0a1f-4cee-9f21-a9eb621b5556" containerID="9b060f5248816d25c9577fffedca050a87900c7a204b7a345af98597b8fe0684" exitCode=0 Nov 26 14:42:58 crc kubenswrapper[5037]: I1126 14:42:58.577598 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556","Type":"ContainerDied","Data":"f148d4c9904f2422f82cba5daa769dab150bbc012402b51083b120ea10222f71"} Nov 26 14:42:58 crc kubenswrapper[5037]: I1126 14:42:58.577625 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556","Type":"ContainerDied","Data":"c8951bc9d395a7d7c6bc798a3590c0f41acfaec0d2abeb3f57d578b0817ef5bf"} Nov 26 14:42:58 crc kubenswrapper[5037]: I1126 14:42:58.577662 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556","Type":"ContainerDied","Data":"9b060f5248816d25c9577fffedca050a87900c7a204b7a345af98597b8fe0684"} Nov 26 14:42:58 crc kubenswrapper[5037]: I1126 14:42:58.605530 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-55bfb77665-gz5lz" podStartSLOduration=3.605505748 podStartE2EDuration="3.605505748s" podCreationTimestamp="2025-11-26 14:42:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:42:58.599744768 +0000 UTC m=+1645.396514952" watchObservedRunningTime="2025-11-26 14:42:58.605505748 +0000 UTC m=+1645.402275942" Nov 26 14:42:58 crc kubenswrapper[5037]: I1126 14:42:58.967613 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 14:42:58 crc kubenswrapper[5037]: I1126 14:42:58.968115 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 14:42:58 crc kubenswrapper[5037]: I1126 14:42:58.981087 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 26 14:42:59 crc kubenswrapper[5037]: I1126 14:42:59.257118 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 14:42:59 crc kubenswrapper[5037]: I1126 14:42:59.257352 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5" containerName="nova-api-log" containerID="cri-o://8a0101817e876e40f7c47968b34dd4a562c0b54f491d0c59c2b95fbc3fb2d558" gracePeriod=30 Nov 26 14:42:59 crc kubenswrapper[5037]: I1126 14:42:59.257461 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5" containerName="nova-api-api" containerID="cri-o://27d186b91dbc239c18433f71264b7bead5b151ee8f447e7f0c53934638e1af3b" gracePeriod=30 Nov 26 14:42:59 crc kubenswrapper[5037]: I1126 14:42:59.588842 5037 generic.go:334] "Generic (PLEG): container finished" podID="dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5" containerID="8a0101817e876e40f7c47968b34dd4a562c0b54f491d0c59c2b95fbc3fb2d558" exitCode=143 Nov 26 14:42:59 crc kubenswrapper[5037]: I1126 14:42:59.588931 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5","Type":"ContainerDied","Data":"8a0101817e876e40f7c47968b34dd4a562c0b54f491d0c59c2b95fbc3fb2d558"} Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.597996 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.630676 5037 generic.go:334] "Generic (PLEG): container finished" podID="dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5" containerID="27d186b91dbc239c18433f71264b7bead5b151ee8f447e7f0c53934638e1af3b" exitCode=0 Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.630731 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5","Type":"ContainerDied","Data":"27d186b91dbc239c18433f71264b7bead5b151ee8f447e7f0c53934638e1af3b"} Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.639038 5037 generic.go:334] "Generic (PLEG): container finished" podID="fdc28ac8-0a1f-4cee-9f21-a9eb621b5556" containerID="4dfef52a4228c6ba6aa50763b516a103d58e8049f0dafa5ac0317240ca27827e" exitCode=0 Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.639072 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556","Type":"ContainerDied","Data":"4dfef52a4228c6ba6aa50763b516a103d58e8049f0dafa5ac0317240ca27827e"} Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.639096 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556","Type":"ContainerDied","Data":"2f3f7e34bbab6e1ae1e6ec76586e4a34e52130932b59701e0056978c4d0ae4b4"} Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.639111 5037 scope.go:117] "RemoveContainer" containerID="f148d4c9904f2422f82cba5daa769dab150bbc012402b51083b120ea10222f71" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.639217 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.652668 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-ceilometer-tls-certs\") pod \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\" (UID: \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\") " Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.652763 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-run-httpd\") pod \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\" (UID: \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\") " Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.652797 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-combined-ca-bundle\") pod \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\" (UID: \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\") " Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.652856 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-log-httpd\") pod \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\" (UID: \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\") " Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.652904 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-scripts\") pod \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\" (UID: \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\") " Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.652930 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-sg-core-conf-yaml\") pod \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\" (UID: \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\") " Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.652969 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ctsjg\" (UniqueName: \"kubernetes.io/projected/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-kube-api-access-ctsjg\") pod \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\" (UID: \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\") " Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.653062 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-config-data\") pod \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\" (UID: \"fdc28ac8-0a1f-4cee-9f21-a9eb621b5556\") " Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.653736 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "fdc28ac8-0a1f-4cee-9f21-a9eb621b5556" (UID: "fdc28ac8-0a1f-4cee-9f21-a9eb621b5556"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.653940 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "fdc28ac8-0a1f-4cee-9f21-a9eb621b5556" (UID: "fdc28ac8-0a1f-4cee-9f21-a9eb621b5556"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.672678 5037 scope.go:117] "RemoveContainer" containerID="c8951bc9d395a7d7c6bc798a3590c0f41acfaec0d2abeb3f57d578b0817ef5bf" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.677492 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-scripts" (OuterVolumeSpecName: "scripts") pod "fdc28ac8-0a1f-4cee-9f21-a9eb621b5556" (UID: "fdc28ac8-0a1f-4cee-9f21-a9eb621b5556"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.678478 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-kube-api-access-ctsjg" (OuterVolumeSpecName: "kube-api-access-ctsjg") pod "fdc28ac8-0a1f-4cee-9f21-a9eb621b5556" (UID: "fdc28ac8-0a1f-4cee-9f21-a9eb621b5556"). InnerVolumeSpecName "kube-api-access-ctsjg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:43:02 crc kubenswrapper[5037]: E1126 14:43:02.704721 5037 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddbf1a5a7_66b5_40ec_95cd_8e91c8822cc5.slice/crio-conmon-27d186b91dbc239c18433f71264b7bead5b151ee8f447e7f0c53934638e1af3b.scope\": RecentStats: unable to find data in memory cache]" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.721615 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "fdc28ac8-0a1f-4cee-9f21-a9eb621b5556" (UID: "fdc28ac8-0a1f-4cee-9f21-a9eb621b5556"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.736548 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "fdc28ac8-0a1f-4cee-9f21-a9eb621b5556" (UID: "fdc28ac8-0a1f-4cee-9f21-a9eb621b5556"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.755370 5037 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.755502 5037 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.755560 5037 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.755614 5037 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.755687 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ctsjg\" (UniqueName: \"kubernetes.io/projected/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-kube-api-access-ctsjg\") on node \"crc\" DevicePath \"\"" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.755747 5037 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.765175 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fdc28ac8-0a1f-4cee-9f21-a9eb621b5556" (UID: "fdc28ac8-0a1f-4cee-9f21-a9eb621b5556"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.774471 5037 scope.go:117] "RemoveContainer" containerID="4dfef52a4228c6ba6aa50763b516a103d58e8049f0dafa5ac0317240ca27827e" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.796897 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.798939 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-config-data" (OuterVolumeSpecName: "config-data") pod "fdc28ac8-0a1f-4cee-9f21-a9eb621b5556" (UID: "fdc28ac8-0a1f-4cee-9f21-a9eb621b5556"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.808462 5037 scope.go:117] "RemoveContainer" containerID="9b060f5248816d25c9577fffedca050a87900c7a204b7a345af98597b8fe0684" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.856680 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pl22v\" (UniqueName: \"kubernetes.io/projected/dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5-kube-api-access-pl22v\") pod \"dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5\" (UID: \"dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5\") " Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.856840 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5-config-data\") pod \"dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5\" (UID: \"dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5\") " Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.856901 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5-logs\") pod \"dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5\" (UID: \"dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5\") " Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.856925 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5-combined-ca-bundle\") pod \"dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5\" (UID: \"dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5\") " Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.857279 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.857307 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.857491 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5-logs" (OuterVolumeSpecName: "logs") pod "dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5" (UID: "dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.860008 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5-kube-api-access-pl22v" (OuterVolumeSpecName: "kube-api-access-pl22v") pod "dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5" (UID: "dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5"). InnerVolumeSpecName "kube-api-access-pl22v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.867867 5037 scope.go:117] "RemoveContainer" containerID="f148d4c9904f2422f82cba5daa769dab150bbc012402b51083b120ea10222f71" Nov 26 14:43:02 crc kubenswrapper[5037]: E1126 14:43:02.874431 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f148d4c9904f2422f82cba5daa769dab150bbc012402b51083b120ea10222f71\": container with ID starting with f148d4c9904f2422f82cba5daa769dab150bbc012402b51083b120ea10222f71 not found: ID does not exist" containerID="f148d4c9904f2422f82cba5daa769dab150bbc012402b51083b120ea10222f71" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.874473 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f148d4c9904f2422f82cba5daa769dab150bbc012402b51083b120ea10222f71"} err="failed to get container status \"f148d4c9904f2422f82cba5daa769dab150bbc012402b51083b120ea10222f71\": rpc error: code = NotFound desc = could not find container \"f148d4c9904f2422f82cba5daa769dab150bbc012402b51083b120ea10222f71\": container with ID starting with f148d4c9904f2422f82cba5daa769dab150bbc012402b51083b120ea10222f71 not found: ID does not exist" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.874499 5037 scope.go:117] "RemoveContainer" containerID="c8951bc9d395a7d7c6bc798a3590c0f41acfaec0d2abeb3f57d578b0817ef5bf" Nov 26 14:43:02 crc kubenswrapper[5037]: E1126 14:43:02.874862 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c8951bc9d395a7d7c6bc798a3590c0f41acfaec0d2abeb3f57d578b0817ef5bf\": container with ID starting with c8951bc9d395a7d7c6bc798a3590c0f41acfaec0d2abeb3f57d578b0817ef5bf not found: ID does not exist" containerID="c8951bc9d395a7d7c6bc798a3590c0f41acfaec0d2abeb3f57d578b0817ef5bf" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.874898 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c8951bc9d395a7d7c6bc798a3590c0f41acfaec0d2abeb3f57d578b0817ef5bf"} err="failed to get container status \"c8951bc9d395a7d7c6bc798a3590c0f41acfaec0d2abeb3f57d578b0817ef5bf\": rpc error: code = NotFound desc = could not find container \"c8951bc9d395a7d7c6bc798a3590c0f41acfaec0d2abeb3f57d578b0817ef5bf\": container with ID starting with c8951bc9d395a7d7c6bc798a3590c0f41acfaec0d2abeb3f57d578b0817ef5bf not found: ID does not exist" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.874922 5037 scope.go:117] "RemoveContainer" containerID="4dfef52a4228c6ba6aa50763b516a103d58e8049f0dafa5ac0317240ca27827e" Nov 26 14:43:02 crc kubenswrapper[5037]: E1126 14:43:02.875199 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4dfef52a4228c6ba6aa50763b516a103d58e8049f0dafa5ac0317240ca27827e\": container with ID starting with 4dfef52a4228c6ba6aa50763b516a103d58e8049f0dafa5ac0317240ca27827e not found: ID does not exist" containerID="4dfef52a4228c6ba6aa50763b516a103d58e8049f0dafa5ac0317240ca27827e" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.875256 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4dfef52a4228c6ba6aa50763b516a103d58e8049f0dafa5ac0317240ca27827e"} err="failed to get container status \"4dfef52a4228c6ba6aa50763b516a103d58e8049f0dafa5ac0317240ca27827e\": rpc error: code = NotFound desc = could not find container \"4dfef52a4228c6ba6aa50763b516a103d58e8049f0dafa5ac0317240ca27827e\": container with ID starting with 4dfef52a4228c6ba6aa50763b516a103d58e8049f0dafa5ac0317240ca27827e not found: ID does not exist" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.875306 5037 scope.go:117] "RemoveContainer" containerID="9b060f5248816d25c9577fffedca050a87900c7a204b7a345af98597b8fe0684" Nov 26 14:43:02 crc kubenswrapper[5037]: E1126 14:43:02.875546 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9b060f5248816d25c9577fffedca050a87900c7a204b7a345af98597b8fe0684\": container with ID starting with 9b060f5248816d25c9577fffedca050a87900c7a204b7a345af98597b8fe0684 not found: ID does not exist" containerID="9b060f5248816d25c9577fffedca050a87900c7a204b7a345af98597b8fe0684" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.875574 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b060f5248816d25c9577fffedca050a87900c7a204b7a345af98597b8fe0684"} err="failed to get container status \"9b060f5248816d25c9577fffedca050a87900c7a204b7a345af98597b8fe0684\": rpc error: code = NotFound desc = could not find container \"9b060f5248816d25c9577fffedca050a87900c7a204b7a345af98597b8fe0684\": container with ID starting with 9b060f5248816d25c9577fffedca050a87900c7a204b7a345af98597b8fe0684 not found: ID does not exist" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.886885 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5" (UID: "dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.897854 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5-config-data" (OuterVolumeSpecName: "config-data") pod "dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5" (UID: "dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.958833 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.959028 5037 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5-logs\") on node \"crc\" DevicePath \"\"" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.959090 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.959157 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pl22v\" (UniqueName: \"kubernetes.io/projected/dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5-kube-api-access-pl22v\") on node \"crc\" DevicePath \"\"" Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.980406 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:43:02 crc kubenswrapper[5037]: I1126 14:43:02.989485 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.013777 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:43:03 crc kubenswrapper[5037]: E1126 14:43:03.014126 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fdc28ac8-0a1f-4cee-9f21-a9eb621b5556" containerName="ceilometer-notification-agent" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.014138 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="fdc28ac8-0a1f-4cee-9f21-a9eb621b5556" containerName="ceilometer-notification-agent" Nov 26 14:43:03 crc kubenswrapper[5037]: E1126 14:43:03.014162 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fdc28ac8-0a1f-4cee-9f21-a9eb621b5556" containerName="proxy-httpd" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.014170 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="fdc28ac8-0a1f-4cee-9f21-a9eb621b5556" containerName="proxy-httpd" Nov 26 14:43:03 crc kubenswrapper[5037]: E1126 14:43:03.014184 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fdc28ac8-0a1f-4cee-9f21-a9eb621b5556" containerName="ceilometer-central-agent" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.014190 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="fdc28ac8-0a1f-4cee-9f21-a9eb621b5556" containerName="ceilometer-central-agent" Nov 26 14:43:03 crc kubenswrapper[5037]: E1126 14:43:03.014202 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fdc28ac8-0a1f-4cee-9f21-a9eb621b5556" containerName="sg-core" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.014208 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="fdc28ac8-0a1f-4cee-9f21-a9eb621b5556" containerName="sg-core" Nov 26 14:43:03 crc kubenswrapper[5037]: E1126 14:43:03.014226 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5" containerName="nova-api-log" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.014231 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5" containerName="nova-api-log" Nov 26 14:43:03 crc kubenswrapper[5037]: E1126 14:43:03.014249 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5" containerName="nova-api-api" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.014255 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5" containerName="nova-api-api" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.014493 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="fdc28ac8-0a1f-4cee-9f21-a9eb621b5556" containerName="ceilometer-central-agent" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.014515 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5" containerName="nova-api-api" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.014527 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="fdc28ac8-0a1f-4cee-9f21-a9eb621b5556" containerName="sg-core" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.014541 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="fdc28ac8-0a1f-4cee-9f21-a9eb621b5556" containerName="proxy-httpd" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.014556 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="fdc28ac8-0a1f-4cee-9f21-a9eb621b5556" containerName="ceilometer-notification-agent" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.014567 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5" containerName="nova-api-log" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.016548 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.018092 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.019033 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.019210 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.028123 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.061166 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7b90229c-2a39-4627-896f-9c1b27e4f1d5-log-httpd\") pod \"ceilometer-0\" (UID: \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\") " pod="openstack/ceilometer-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.061231 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b90229c-2a39-4627-896f-9c1b27e4f1d5-config-data\") pod \"ceilometer-0\" (UID: \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\") " pod="openstack/ceilometer-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.061264 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7b90229c-2a39-4627-896f-9c1b27e4f1d5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\") " pod="openstack/ceilometer-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.061314 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7b90229c-2a39-4627-896f-9c1b27e4f1d5-run-httpd\") pod \"ceilometer-0\" (UID: \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\") " pod="openstack/ceilometer-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.061336 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b90229c-2a39-4627-896f-9c1b27e4f1d5-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\") " pod="openstack/ceilometer-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.061351 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b90229c-2a39-4627-896f-9c1b27e4f1d5-scripts\") pod \"ceilometer-0\" (UID: \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\") " pod="openstack/ceilometer-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.061367 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xbbxj\" (UniqueName: \"kubernetes.io/projected/7b90229c-2a39-4627-896f-9c1b27e4f1d5-kube-api-access-xbbxj\") pod \"ceilometer-0\" (UID: \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\") " pod="openstack/ceilometer-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.061436 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b90229c-2a39-4627-896f-9c1b27e4f1d5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\") " pod="openstack/ceilometer-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.163471 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7b90229c-2a39-4627-896f-9c1b27e4f1d5-run-httpd\") pod \"ceilometer-0\" (UID: \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\") " pod="openstack/ceilometer-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.163550 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b90229c-2a39-4627-896f-9c1b27e4f1d5-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\") " pod="openstack/ceilometer-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.163570 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b90229c-2a39-4627-896f-9c1b27e4f1d5-scripts\") pod \"ceilometer-0\" (UID: \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\") " pod="openstack/ceilometer-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.163594 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xbbxj\" (UniqueName: \"kubernetes.io/projected/7b90229c-2a39-4627-896f-9c1b27e4f1d5-kube-api-access-xbbxj\") pod \"ceilometer-0\" (UID: \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\") " pod="openstack/ceilometer-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.163696 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b90229c-2a39-4627-896f-9c1b27e4f1d5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\") " pod="openstack/ceilometer-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.164076 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7b90229c-2a39-4627-896f-9c1b27e4f1d5-log-httpd\") pod \"ceilometer-0\" (UID: \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\") " pod="openstack/ceilometer-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.164101 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7b90229c-2a39-4627-896f-9c1b27e4f1d5-run-httpd\") pod \"ceilometer-0\" (UID: \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\") " pod="openstack/ceilometer-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.164172 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b90229c-2a39-4627-896f-9c1b27e4f1d5-config-data\") pod \"ceilometer-0\" (UID: \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\") " pod="openstack/ceilometer-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.164481 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7b90229c-2a39-4627-896f-9c1b27e4f1d5-log-httpd\") pod \"ceilometer-0\" (UID: \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\") " pod="openstack/ceilometer-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.164587 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7b90229c-2a39-4627-896f-9c1b27e4f1d5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\") " pod="openstack/ceilometer-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.170056 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b90229c-2a39-4627-896f-9c1b27e4f1d5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\") " pod="openstack/ceilometer-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.170264 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b90229c-2a39-4627-896f-9c1b27e4f1d5-config-data\") pod \"ceilometer-0\" (UID: \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\") " pod="openstack/ceilometer-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.170722 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7b90229c-2a39-4627-896f-9c1b27e4f1d5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\") " pod="openstack/ceilometer-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.174362 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b90229c-2a39-4627-896f-9c1b27e4f1d5-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\") " pod="openstack/ceilometer-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.179073 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b90229c-2a39-4627-896f-9c1b27e4f1d5-scripts\") pod \"ceilometer-0\" (UID: \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\") " pod="openstack/ceilometer-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.187128 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xbbxj\" (UniqueName: \"kubernetes.io/projected/7b90229c-2a39-4627-896f-9c1b27e4f1d5-kube-api-access-xbbxj\") pod \"ceilometer-0\" (UID: \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\") " pod="openstack/ceilometer-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.341246 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.609896 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.651515 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7b90229c-2a39-4627-896f-9c1b27e4f1d5","Type":"ContainerStarted","Data":"b48ea63d29bf52c4bb1b9d55f2d538dc9d9558b373eafb10bbb2f277e711a5e2"} Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.653629 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5","Type":"ContainerDied","Data":"be108884803bb557e8adf398c23206c72cc3c512672dba77b21bb6a31e2cce4b"} Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.653664 5037 scope.go:117] "RemoveContainer" containerID="27d186b91dbc239c18433f71264b7bead5b151ee8f447e7f0c53934638e1af3b" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.653825 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.705888 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.709473 5037 scope.go:117] "RemoveContainer" containerID="8a0101817e876e40f7c47968b34dd4a562c0b54f491d0c59c2b95fbc3fb2d558" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.715721 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.729079 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.730915 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.738436 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.738454 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.738502 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.743735 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.796516 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/74fa2905-fe61-4b29-a77f-b388199afd56-logs\") pod \"nova-api-0\" (UID: \"74fa2905-fe61-4b29-a77f-b388199afd56\") " pod="openstack/nova-api-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.796611 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/74fa2905-fe61-4b29-a77f-b388199afd56-public-tls-certs\") pod \"nova-api-0\" (UID: \"74fa2905-fe61-4b29-a77f-b388199afd56\") " pod="openstack/nova-api-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.796668 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74fa2905-fe61-4b29-a77f-b388199afd56-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"74fa2905-fe61-4b29-a77f-b388199afd56\") " pod="openstack/nova-api-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.796696 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/74fa2905-fe61-4b29-a77f-b388199afd56-internal-tls-certs\") pod \"nova-api-0\" (UID: \"74fa2905-fe61-4b29-a77f-b388199afd56\") " pod="openstack/nova-api-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.796717 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9r54g\" (UniqueName: \"kubernetes.io/projected/74fa2905-fe61-4b29-a77f-b388199afd56-kube-api-access-9r54g\") pod \"nova-api-0\" (UID: \"74fa2905-fe61-4b29-a77f-b388199afd56\") " pod="openstack/nova-api-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.796735 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74fa2905-fe61-4b29-a77f-b388199afd56-config-data\") pod \"nova-api-0\" (UID: \"74fa2905-fe61-4b29-a77f-b388199afd56\") " pod="openstack/nova-api-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.898314 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/74fa2905-fe61-4b29-a77f-b388199afd56-public-tls-certs\") pod \"nova-api-0\" (UID: \"74fa2905-fe61-4b29-a77f-b388199afd56\") " pod="openstack/nova-api-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.898418 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74fa2905-fe61-4b29-a77f-b388199afd56-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"74fa2905-fe61-4b29-a77f-b388199afd56\") " pod="openstack/nova-api-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.898456 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/74fa2905-fe61-4b29-a77f-b388199afd56-internal-tls-certs\") pod \"nova-api-0\" (UID: \"74fa2905-fe61-4b29-a77f-b388199afd56\") " pod="openstack/nova-api-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.898482 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9r54g\" (UniqueName: \"kubernetes.io/projected/74fa2905-fe61-4b29-a77f-b388199afd56-kube-api-access-9r54g\") pod \"nova-api-0\" (UID: \"74fa2905-fe61-4b29-a77f-b388199afd56\") " pod="openstack/nova-api-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.898501 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74fa2905-fe61-4b29-a77f-b388199afd56-config-data\") pod \"nova-api-0\" (UID: \"74fa2905-fe61-4b29-a77f-b388199afd56\") " pod="openstack/nova-api-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.898524 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/74fa2905-fe61-4b29-a77f-b388199afd56-logs\") pod \"nova-api-0\" (UID: \"74fa2905-fe61-4b29-a77f-b388199afd56\") " pod="openstack/nova-api-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.899085 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/74fa2905-fe61-4b29-a77f-b388199afd56-logs\") pod \"nova-api-0\" (UID: \"74fa2905-fe61-4b29-a77f-b388199afd56\") " pod="openstack/nova-api-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.904755 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/74fa2905-fe61-4b29-a77f-b388199afd56-public-tls-certs\") pod \"nova-api-0\" (UID: \"74fa2905-fe61-4b29-a77f-b388199afd56\") " pod="openstack/nova-api-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.904871 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74fa2905-fe61-4b29-a77f-b388199afd56-config-data\") pod \"nova-api-0\" (UID: \"74fa2905-fe61-4b29-a77f-b388199afd56\") " pod="openstack/nova-api-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.904903 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/74fa2905-fe61-4b29-a77f-b388199afd56-internal-tls-certs\") pod \"nova-api-0\" (UID: \"74fa2905-fe61-4b29-a77f-b388199afd56\") " pod="openstack/nova-api-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.906863 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74fa2905-fe61-4b29-a77f-b388199afd56-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"74fa2905-fe61-4b29-a77f-b388199afd56\") " pod="openstack/nova-api-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.916750 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9r54g\" (UniqueName: \"kubernetes.io/projected/74fa2905-fe61-4b29-a77f-b388199afd56-kube-api-access-9r54g\") pod \"nova-api-0\" (UID: \"74fa2905-fe61-4b29-a77f-b388199afd56\") " pod="openstack/nova-api-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.920975 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5" path="/var/lib/kubelet/pods/dbf1a5a7-66b5-40ec-95cd-8e91c8822cc5/volumes" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.921687 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fdc28ac8-0a1f-4cee-9f21-a9eb621b5556" path="/var/lib/kubelet/pods/fdc28ac8-0a1f-4cee-9f21-a9eb621b5556/volumes" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.966924 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.966963 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.981427 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 26 14:43:03 crc kubenswrapper[5037]: I1126 14:43:03.999557 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 26 14:43:04 crc kubenswrapper[5037]: I1126 14:43:04.061115 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 14:43:04 crc kubenswrapper[5037]: I1126 14:43:04.532994 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 14:43:04 crc kubenswrapper[5037]: W1126 14:43:04.543056 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod74fa2905_fe61_4b29_a77f_b388199afd56.slice/crio-fc6f0dd8361442b18687f801cf4e77bcdec230add5e03ff56f295891c6713768 WatchSource:0}: Error finding container fc6f0dd8361442b18687f801cf4e77bcdec230add5e03ff56f295891c6713768: Status 404 returned error can't find the container with id fc6f0dd8361442b18687f801cf4e77bcdec230add5e03ff56f295891c6713768 Nov 26 14:43:04 crc kubenswrapper[5037]: I1126 14:43:04.669682 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"74fa2905-fe61-4b29-a77f-b388199afd56","Type":"ContainerStarted","Data":"fc6f0dd8361442b18687f801cf4e77bcdec230add5e03ff56f295891c6713768"} Nov 26 14:43:04 crc kubenswrapper[5037]: I1126 14:43:04.671107 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7b90229c-2a39-4627-896f-9c1b27e4f1d5","Type":"ContainerStarted","Data":"d9e7e3cabf68f8c77fe540ee66229c4f639270ea37015bbb512cd6402e09b909"} Nov 26 14:43:04 crc kubenswrapper[5037]: I1126 14:43:04.690392 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 26 14:43:04 crc kubenswrapper[5037]: I1126 14:43:04.862215 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-5n698"] Nov 26 14:43:04 crc kubenswrapper[5037]: I1126 14:43:04.863498 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-5n698" Nov 26 14:43:04 crc kubenswrapper[5037]: I1126 14:43:04.866850 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 26 14:43:04 crc kubenswrapper[5037]: I1126 14:43:04.866984 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 26 14:43:04 crc kubenswrapper[5037]: I1126 14:43:04.889254 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-5n698"] Nov 26 14:43:04 crc kubenswrapper[5037]: I1126 14:43:04.932580 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a48383c9-d4b8-4b6d-8809-3be689c45803-scripts\") pod \"nova-cell1-cell-mapping-5n698\" (UID: \"a48383c9-d4b8-4b6d-8809-3be689c45803\") " pod="openstack/nova-cell1-cell-mapping-5n698" Nov 26 14:43:04 crc kubenswrapper[5037]: I1126 14:43:04.932776 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2n952\" (UniqueName: \"kubernetes.io/projected/a48383c9-d4b8-4b6d-8809-3be689c45803-kube-api-access-2n952\") pod \"nova-cell1-cell-mapping-5n698\" (UID: \"a48383c9-d4b8-4b6d-8809-3be689c45803\") " pod="openstack/nova-cell1-cell-mapping-5n698" Nov 26 14:43:04 crc kubenswrapper[5037]: I1126 14:43:04.932908 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a48383c9-d4b8-4b6d-8809-3be689c45803-config-data\") pod \"nova-cell1-cell-mapping-5n698\" (UID: \"a48383c9-d4b8-4b6d-8809-3be689c45803\") " pod="openstack/nova-cell1-cell-mapping-5n698" Nov 26 14:43:04 crc kubenswrapper[5037]: I1126 14:43:04.933006 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a48383c9-d4b8-4b6d-8809-3be689c45803-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-5n698\" (UID: \"a48383c9-d4b8-4b6d-8809-3be689c45803\") " pod="openstack/nova-cell1-cell-mapping-5n698" Nov 26 14:43:04 crc kubenswrapper[5037]: I1126 14:43:04.984520 5037 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="818cd85a-3db7-4a42-ac8e-5bf11c024493" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.191:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 14:43:04 crc kubenswrapper[5037]: I1126 14:43:04.984835 5037 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="818cd85a-3db7-4a42-ac8e-5bf11c024493" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.191:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 14:43:05 crc kubenswrapper[5037]: I1126 14:43:05.035607 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2n952\" (UniqueName: \"kubernetes.io/projected/a48383c9-d4b8-4b6d-8809-3be689c45803-kube-api-access-2n952\") pod \"nova-cell1-cell-mapping-5n698\" (UID: \"a48383c9-d4b8-4b6d-8809-3be689c45803\") " pod="openstack/nova-cell1-cell-mapping-5n698" Nov 26 14:43:05 crc kubenswrapper[5037]: I1126 14:43:05.035949 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a48383c9-d4b8-4b6d-8809-3be689c45803-config-data\") pod \"nova-cell1-cell-mapping-5n698\" (UID: \"a48383c9-d4b8-4b6d-8809-3be689c45803\") " pod="openstack/nova-cell1-cell-mapping-5n698" Nov 26 14:43:05 crc kubenswrapper[5037]: I1126 14:43:05.035987 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a48383c9-d4b8-4b6d-8809-3be689c45803-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-5n698\" (UID: \"a48383c9-d4b8-4b6d-8809-3be689c45803\") " pod="openstack/nova-cell1-cell-mapping-5n698" Nov 26 14:43:05 crc kubenswrapper[5037]: I1126 14:43:05.036072 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a48383c9-d4b8-4b6d-8809-3be689c45803-scripts\") pod \"nova-cell1-cell-mapping-5n698\" (UID: \"a48383c9-d4b8-4b6d-8809-3be689c45803\") " pod="openstack/nova-cell1-cell-mapping-5n698" Nov 26 14:43:05 crc kubenswrapper[5037]: I1126 14:43:05.039967 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a48383c9-d4b8-4b6d-8809-3be689c45803-scripts\") pod \"nova-cell1-cell-mapping-5n698\" (UID: \"a48383c9-d4b8-4b6d-8809-3be689c45803\") " pod="openstack/nova-cell1-cell-mapping-5n698" Nov 26 14:43:05 crc kubenswrapper[5037]: I1126 14:43:05.045591 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a48383c9-d4b8-4b6d-8809-3be689c45803-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-5n698\" (UID: \"a48383c9-d4b8-4b6d-8809-3be689c45803\") " pod="openstack/nova-cell1-cell-mapping-5n698" Nov 26 14:43:05 crc kubenswrapper[5037]: I1126 14:43:05.049322 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a48383c9-d4b8-4b6d-8809-3be689c45803-config-data\") pod \"nova-cell1-cell-mapping-5n698\" (UID: \"a48383c9-d4b8-4b6d-8809-3be689c45803\") " pod="openstack/nova-cell1-cell-mapping-5n698" Nov 26 14:43:05 crc kubenswrapper[5037]: I1126 14:43:05.060429 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2n952\" (UniqueName: \"kubernetes.io/projected/a48383c9-d4b8-4b6d-8809-3be689c45803-kube-api-access-2n952\") pod \"nova-cell1-cell-mapping-5n698\" (UID: \"a48383c9-d4b8-4b6d-8809-3be689c45803\") " pod="openstack/nova-cell1-cell-mapping-5n698" Nov 26 14:43:05 crc kubenswrapper[5037]: I1126 14:43:05.190412 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-5n698" Nov 26 14:43:05 crc kubenswrapper[5037]: I1126 14:43:05.688644 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-5n698"] Nov 26 14:43:05 crc kubenswrapper[5037]: I1126 14:43:05.695172 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"74fa2905-fe61-4b29-a77f-b388199afd56","Type":"ContainerStarted","Data":"254fed9a6c7832cd9c31c9fa91219866f069537254a0a25f6027823b0cde8c85"} Nov 26 14:43:05 crc kubenswrapper[5037]: I1126 14:43:05.695221 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"74fa2905-fe61-4b29-a77f-b388199afd56","Type":"ContainerStarted","Data":"81412ce1a09f43f764475b0e6b8620173942a665fb9746d68f021aa83a2d3888"} Nov 26 14:43:05 crc kubenswrapper[5037]: I1126 14:43:05.720208 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7b90229c-2a39-4627-896f-9c1b27e4f1d5","Type":"ContainerStarted","Data":"71d63cfc921e9e46b869f583ba1be1fd0b73e384b0a9b8c0e83735a75f13ecda"} Nov 26 14:43:05 crc kubenswrapper[5037]: I1126 14:43:05.743442 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.743419851 podStartE2EDuration="2.743419851s" podCreationTimestamp="2025-11-26 14:43:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:43:05.735719713 +0000 UTC m=+1652.532489897" watchObservedRunningTime="2025-11-26 14:43:05.743419851 +0000 UTC m=+1652.540190035" Nov 26 14:43:05 crc kubenswrapper[5037]: I1126 14:43:05.908241 5037 scope.go:117] "RemoveContainer" containerID="5e69d7717514aa68d798cc4f8eee9b2d5d3e9666ca3b110c2cb4c6b90f9e1181" Nov 26 14:43:05 crc kubenswrapper[5037]: E1126 14:43:05.908514 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:43:06 crc kubenswrapper[5037]: I1126 14:43:06.077878 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-55bfb77665-gz5lz" Nov 26 14:43:06 crc kubenswrapper[5037]: I1126 14:43:06.136493 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-64dbf5859c-xfcds"] Nov 26 14:43:06 crc kubenswrapper[5037]: I1126 14:43:06.136831 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-64dbf5859c-xfcds" podUID="faa2f8fc-e417-40ea-bd09-280b79a99548" containerName="dnsmasq-dns" containerID="cri-o://7543287d626875e96b7fd10badbb09ba29d86483c98c340761eb4eeae930d754" gracePeriod=10 Nov 26 14:43:06 crc kubenswrapper[5037]: I1126 14:43:06.658772 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64dbf5859c-xfcds" Nov 26 14:43:06 crc kubenswrapper[5037]: I1126 14:43:06.760553 5037 generic.go:334] "Generic (PLEG): container finished" podID="faa2f8fc-e417-40ea-bd09-280b79a99548" containerID="7543287d626875e96b7fd10badbb09ba29d86483c98c340761eb4eeae930d754" exitCode=0 Nov 26 14:43:06 crc kubenswrapper[5037]: I1126 14:43:06.760857 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64dbf5859c-xfcds" Nov 26 14:43:06 crc kubenswrapper[5037]: I1126 14:43:06.760865 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64dbf5859c-xfcds" event={"ID":"faa2f8fc-e417-40ea-bd09-280b79a99548","Type":"ContainerDied","Data":"7543287d626875e96b7fd10badbb09ba29d86483c98c340761eb4eeae930d754"} Nov 26 14:43:06 crc kubenswrapper[5037]: I1126 14:43:06.761186 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64dbf5859c-xfcds" event={"ID":"faa2f8fc-e417-40ea-bd09-280b79a99548","Type":"ContainerDied","Data":"443ba2ac3c6645f0e65cb70f28f0ba9e4fc5e6808de9467617c1e70f5d13e141"} Nov 26 14:43:06 crc kubenswrapper[5037]: I1126 14:43:06.761212 5037 scope.go:117] "RemoveContainer" containerID="7543287d626875e96b7fd10badbb09ba29d86483c98c340761eb4eeae930d754" Nov 26 14:43:06 crc kubenswrapper[5037]: I1126 14:43:06.772839 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/faa2f8fc-e417-40ea-bd09-280b79a99548-ovsdbserver-sb\") pod \"faa2f8fc-e417-40ea-bd09-280b79a99548\" (UID: \"faa2f8fc-e417-40ea-bd09-280b79a99548\") " Nov 26 14:43:06 crc kubenswrapper[5037]: I1126 14:43:06.772947 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/faa2f8fc-e417-40ea-bd09-280b79a99548-ovsdbserver-nb\") pod \"faa2f8fc-e417-40ea-bd09-280b79a99548\" (UID: \"faa2f8fc-e417-40ea-bd09-280b79a99548\") " Nov 26 14:43:06 crc kubenswrapper[5037]: I1126 14:43:06.773015 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/faa2f8fc-e417-40ea-bd09-280b79a99548-config\") pod \"faa2f8fc-e417-40ea-bd09-280b79a99548\" (UID: \"faa2f8fc-e417-40ea-bd09-280b79a99548\") " Nov 26 14:43:06 crc kubenswrapper[5037]: I1126 14:43:06.773068 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/faa2f8fc-e417-40ea-bd09-280b79a99548-dns-svc\") pod \"faa2f8fc-e417-40ea-bd09-280b79a99548\" (UID: \"faa2f8fc-e417-40ea-bd09-280b79a99548\") " Nov 26 14:43:06 crc kubenswrapper[5037]: I1126 14:43:06.773151 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/faa2f8fc-e417-40ea-bd09-280b79a99548-dns-swift-storage-0\") pod \"faa2f8fc-e417-40ea-bd09-280b79a99548\" (UID: \"faa2f8fc-e417-40ea-bd09-280b79a99548\") " Nov 26 14:43:06 crc kubenswrapper[5037]: I1126 14:43:06.773198 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-psdj5\" (UniqueName: \"kubernetes.io/projected/faa2f8fc-e417-40ea-bd09-280b79a99548-kube-api-access-psdj5\") pod \"faa2f8fc-e417-40ea-bd09-280b79a99548\" (UID: \"faa2f8fc-e417-40ea-bd09-280b79a99548\") " Nov 26 14:43:06 crc kubenswrapper[5037]: I1126 14:43:06.796125 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/faa2f8fc-e417-40ea-bd09-280b79a99548-kube-api-access-psdj5" (OuterVolumeSpecName: "kube-api-access-psdj5") pod "faa2f8fc-e417-40ea-bd09-280b79a99548" (UID: "faa2f8fc-e417-40ea-bd09-280b79a99548"). InnerVolumeSpecName "kube-api-access-psdj5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:43:06 crc kubenswrapper[5037]: I1126 14:43:06.809231 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7b90229c-2a39-4627-896f-9c1b27e4f1d5","Type":"ContainerStarted","Data":"e755b8c60d9bc3fba924bee940809c862f89fc6885ca06dec0c4232e6e6116ba"} Nov 26 14:43:06 crc kubenswrapper[5037]: I1126 14:43:06.826000 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-5n698" event={"ID":"a48383c9-d4b8-4b6d-8809-3be689c45803","Type":"ContainerStarted","Data":"ed4a7f81c0c4d4bec4821337a11a3efc648b67a00e6372d401c2ff6c7c2b75e3"} Nov 26 14:43:06 crc kubenswrapper[5037]: I1126 14:43:06.826071 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-5n698" event={"ID":"a48383c9-d4b8-4b6d-8809-3be689c45803","Type":"ContainerStarted","Data":"e1b6a4fab5102858d9555e1129850d38316402811e0cece4603899d64a73169e"} Nov 26 14:43:06 crc kubenswrapper[5037]: I1126 14:43:06.844368 5037 scope.go:117] "RemoveContainer" containerID="acb7120d538d04bcd14b10d871a54e95a0fdf3529c741deb7e0d73851f6d275d" Nov 26 14:43:06 crc kubenswrapper[5037]: I1126 14:43:06.850941 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-5n698" podStartSLOduration=2.850924363 podStartE2EDuration="2.850924363s" podCreationTimestamp="2025-11-26 14:43:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:43:06.844223559 +0000 UTC m=+1653.640993743" watchObservedRunningTime="2025-11-26 14:43:06.850924363 +0000 UTC m=+1653.647694547" Nov 26 14:43:06 crc kubenswrapper[5037]: I1126 14:43:06.861925 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/faa2f8fc-e417-40ea-bd09-280b79a99548-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "faa2f8fc-e417-40ea-bd09-280b79a99548" (UID: "faa2f8fc-e417-40ea-bd09-280b79a99548"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:43:06 crc kubenswrapper[5037]: I1126 14:43:06.875962 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-psdj5\" (UniqueName: \"kubernetes.io/projected/faa2f8fc-e417-40ea-bd09-280b79a99548-kube-api-access-psdj5\") on node \"crc\" DevicePath \"\"" Nov 26 14:43:06 crc kubenswrapper[5037]: I1126 14:43:06.875997 5037 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/faa2f8fc-e417-40ea-bd09-280b79a99548-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 14:43:06 crc kubenswrapper[5037]: I1126 14:43:06.879237 5037 scope.go:117] "RemoveContainer" containerID="7543287d626875e96b7fd10badbb09ba29d86483c98c340761eb4eeae930d754" Nov 26 14:43:06 crc kubenswrapper[5037]: I1126 14:43:06.879663 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/faa2f8fc-e417-40ea-bd09-280b79a99548-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "faa2f8fc-e417-40ea-bd09-280b79a99548" (UID: "faa2f8fc-e417-40ea-bd09-280b79a99548"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:43:06 crc kubenswrapper[5037]: E1126 14:43:06.879662 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7543287d626875e96b7fd10badbb09ba29d86483c98c340761eb4eeae930d754\": container with ID starting with 7543287d626875e96b7fd10badbb09ba29d86483c98c340761eb4eeae930d754 not found: ID does not exist" containerID="7543287d626875e96b7fd10badbb09ba29d86483c98c340761eb4eeae930d754" Nov 26 14:43:06 crc kubenswrapper[5037]: I1126 14:43:06.879710 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7543287d626875e96b7fd10badbb09ba29d86483c98c340761eb4eeae930d754"} err="failed to get container status \"7543287d626875e96b7fd10badbb09ba29d86483c98c340761eb4eeae930d754\": rpc error: code = NotFound desc = could not find container \"7543287d626875e96b7fd10badbb09ba29d86483c98c340761eb4eeae930d754\": container with ID starting with 7543287d626875e96b7fd10badbb09ba29d86483c98c340761eb4eeae930d754 not found: ID does not exist" Nov 26 14:43:06 crc kubenswrapper[5037]: I1126 14:43:06.879730 5037 scope.go:117] "RemoveContainer" containerID="acb7120d538d04bcd14b10d871a54e95a0fdf3529c741deb7e0d73851f6d275d" Nov 26 14:43:06 crc kubenswrapper[5037]: E1126 14:43:06.879945 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"acb7120d538d04bcd14b10d871a54e95a0fdf3529c741deb7e0d73851f6d275d\": container with ID starting with acb7120d538d04bcd14b10d871a54e95a0fdf3529c741deb7e0d73851f6d275d not found: ID does not exist" containerID="acb7120d538d04bcd14b10d871a54e95a0fdf3529c741deb7e0d73851f6d275d" Nov 26 14:43:06 crc kubenswrapper[5037]: I1126 14:43:06.879979 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"acb7120d538d04bcd14b10d871a54e95a0fdf3529c741deb7e0d73851f6d275d"} err="failed to get container status \"acb7120d538d04bcd14b10d871a54e95a0fdf3529c741deb7e0d73851f6d275d\": rpc error: code = NotFound desc = could not find container \"acb7120d538d04bcd14b10d871a54e95a0fdf3529c741deb7e0d73851f6d275d\": container with ID starting with acb7120d538d04bcd14b10d871a54e95a0fdf3529c741deb7e0d73851f6d275d not found: ID does not exist" Nov 26 14:43:06 crc kubenswrapper[5037]: I1126 14:43:06.890679 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/faa2f8fc-e417-40ea-bd09-280b79a99548-config" (OuterVolumeSpecName: "config") pod "faa2f8fc-e417-40ea-bd09-280b79a99548" (UID: "faa2f8fc-e417-40ea-bd09-280b79a99548"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:43:06 crc kubenswrapper[5037]: I1126 14:43:06.897130 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/faa2f8fc-e417-40ea-bd09-280b79a99548-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "faa2f8fc-e417-40ea-bd09-280b79a99548" (UID: "faa2f8fc-e417-40ea-bd09-280b79a99548"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:43:06 crc kubenswrapper[5037]: I1126 14:43:06.905741 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/faa2f8fc-e417-40ea-bd09-280b79a99548-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "faa2f8fc-e417-40ea-bd09-280b79a99548" (UID: "faa2f8fc-e417-40ea-bd09-280b79a99548"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:43:06 crc kubenswrapper[5037]: I1126 14:43:06.977281 5037 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/faa2f8fc-e417-40ea-bd09-280b79a99548-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 14:43:06 crc kubenswrapper[5037]: I1126 14:43:06.977330 5037 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/faa2f8fc-e417-40ea-bd09-280b79a99548-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 14:43:06 crc kubenswrapper[5037]: I1126 14:43:06.977340 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/faa2f8fc-e417-40ea-bd09-280b79a99548-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:43:06 crc kubenswrapper[5037]: I1126 14:43:06.977350 5037 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/faa2f8fc-e417-40ea-bd09-280b79a99548-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 14:43:07 crc kubenswrapper[5037]: I1126 14:43:07.098032 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-64dbf5859c-xfcds"] Nov 26 14:43:07 crc kubenswrapper[5037]: I1126 14:43:07.106969 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-64dbf5859c-xfcds"] Nov 26 14:43:07 crc kubenswrapper[5037]: I1126 14:43:07.918368 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="faa2f8fc-e417-40ea-bd09-280b79a99548" path="/var/lib/kubelet/pods/faa2f8fc-e417-40ea-bd09-280b79a99548/volumes" Nov 26 14:43:08 crc kubenswrapper[5037]: I1126 14:43:08.857085 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7b90229c-2a39-4627-896f-9c1b27e4f1d5","Type":"ContainerStarted","Data":"4b66f0aa9cd359a08c2fc701fbd668ab4e74119711476d65846830c7024d146e"} Nov 26 14:43:08 crc kubenswrapper[5037]: I1126 14:43:08.857798 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 14:43:08 crc kubenswrapper[5037]: I1126 14:43:08.884584 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.807564388 podStartE2EDuration="6.88456261s" podCreationTimestamp="2025-11-26 14:43:02 +0000 UTC" firstStartedPulling="2025-11-26 14:43:03.614084098 +0000 UTC m=+1650.410854282" lastFinishedPulling="2025-11-26 14:43:07.69108232 +0000 UTC m=+1654.487852504" observedRunningTime="2025-11-26 14:43:08.881338731 +0000 UTC m=+1655.678108915" watchObservedRunningTime="2025-11-26 14:43:08.88456261 +0000 UTC m=+1655.681332794" Nov 26 14:43:11 crc kubenswrapper[5037]: I1126 14:43:11.885776 5037 generic.go:334] "Generic (PLEG): container finished" podID="a48383c9-d4b8-4b6d-8809-3be689c45803" containerID="ed4a7f81c0c4d4bec4821337a11a3efc648b67a00e6372d401c2ff6c7c2b75e3" exitCode=0 Nov 26 14:43:11 crc kubenswrapper[5037]: I1126 14:43:11.885864 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-5n698" event={"ID":"a48383c9-d4b8-4b6d-8809-3be689c45803","Type":"ContainerDied","Data":"ed4a7f81c0c4d4bec4821337a11a3efc648b67a00e6372d401c2ff6c7c2b75e3"} Nov 26 14:43:13 crc kubenswrapper[5037]: I1126 14:43:13.285158 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-5n698" Nov 26 14:43:13 crc kubenswrapper[5037]: I1126 14:43:13.437790 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a48383c9-d4b8-4b6d-8809-3be689c45803-config-data\") pod \"a48383c9-d4b8-4b6d-8809-3be689c45803\" (UID: \"a48383c9-d4b8-4b6d-8809-3be689c45803\") " Nov 26 14:43:13 crc kubenswrapper[5037]: I1126 14:43:13.437956 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2n952\" (UniqueName: \"kubernetes.io/projected/a48383c9-d4b8-4b6d-8809-3be689c45803-kube-api-access-2n952\") pod \"a48383c9-d4b8-4b6d-8809-3be689c45803\" (UID: \"a48383c9-d4b8-4b6d-8809-3be689c45803\") " Nov 26 14:43:13 crc kubenswrapper[5037]: I1126 14:43:13.438078 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a48383c9-d4b8-4b6d-8809-3be689c45803-combined-ca-bundle\") pod \"a48383c9-d4b8-4b6d-8809-3be689c45803\" (UID: \"a48383c9-d4b8-4b6d-8809-3be689c45803\") " Nov 26 14:43:13 crc kubenswrapper[5037]: I1126 14:43:13.438108 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a48383c9-d4b8-4b6d-8809-3be689c45803-scripts\") pod \"a48383c9-d4b8-4b6d-8809-3be689c45803\" (UID: \"a48383c9-d4b8-4b6d-8809-3be689c45803\") " Nov 26 14:43:13 crc kubenswrapper[5037]: I1126 14:43:13.443558 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a48383c9-d4b8-4b6d-8809-3be689c45803-kube-api-access-2n952" (OuterVolumeSpecName: "kube-api-access-2n952") pod "a48383c9-d4b8-4b6d-8809-3be689c45803" (UID: "a48383c9-d4b8-4b6d-8809-3be689c45803"). InnerVolumeSpecName "kube-api-access-2n952". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:43:13 crc kubenswrapper[5037]: I1126 14:43:13.451748 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a48383c9-d4b8-4b6d-8809-3be689c45803-scripts" (OuterVolumeSpecName: "scripts") pod "a48383c9-d4b8-4b6d-8809-3be689c45803" (UID: "a48383c9-d4b8-4b6d-8809-3be689c45803"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:43:13 crc kubenswrapper[5037]: I1126 14:43:13.466021 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a48383c9-d4b8-4b6d-8809-3be689c45803-config-data" (OuterVolumeSpecName: "config-data") pod "a48383c9-d4b8-4b6d-8809-3be689c45803" (UID: "a48383c9-d4b8-4b6d-8809-3be689c45803"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:43:13 crc kubenswrapper[5037]: I1126 14:43:13.476846 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a48383c9-d4b8-4b6d-8809-3be689c45803-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a48383c9-d4b8-4b6d-8809-3be689c45803" (UID: "a48383c9-d4b8-4b6d-8809-3be689c45803"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:43:13 crc kubenswrapper[5037]: I1126 14:43:13.540670 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2n952\" (UniqueName: \"kubernetes.io/projected/a48383c9-d4b8-4b6d-8809-3be689c45803-kube-api-access-2n952\") on node \"crc\" DevicePath \"\"" Nov 26 14:43:13 crc kubenswrapper[5037]: I1126 14:43:13.540716 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a48383c9-d4b8-4b6d-8809-3be689c45803-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:43:13 crc kubenswrapper[5037]: I1126 14:43:13.540730 5037 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a48383c9-d4b8-4b6d-8809-3be689c45803-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:43:13 crc kubenswrapper[5037]: I1126 14:43:13.540743 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a48383c9-d4b8-4b6d-8809-3be689c45803-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:43:13 crc kubenswrapper[5037]: I1126 14:43:13.946857 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-5n698" event={"ID":"a48383c9-d4b8-4b6d-8809-3be689c45803","Type":"ContainerDied","Data":"e1b6a4fab5102858d9555e1129850d38316402811e0cece4603899d64a73169e"} Nov 26 14:43:13 crc kubenswrapper[5037]: I1126 14:43:13.946912 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e1b6a4fab5102858d9555e1129850d38316402811e0cece4603899d64a73169e" Nov 26 14:43:13 crc kubenswrapper[5037]: I1126 14:43:13.946941 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-5n698" Nov 26 14:43:13 crc kubenswrapper[5037]: I1126 14:43:13.977445 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 26 14:43:13 crc kubenswrapper[5037]: I1126 14:43:13.979699 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 26 14:43:13 crc kubenswrapper[5037]: I1126 14:43:13.988912 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 26 14:43:14 crc kubenswrapper[5037]: I1126 14:43:14.062437 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 14:43:14 crc kubenswrapper[5037]: I1126 14:43:14.062509 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 14:43:14 crc kubenswrapper[5037]: I1126 14:43:14.118615 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 14:43:14 crc kubenswrapper[5037]: I1126 14:43:14.153788 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 14:43:14 crc kubenswrapper[5037]: I1126 14:43:14.167452 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 14:43:14 crc kubenswrapper[5037]: I1126 14:43:14.167693 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="55121b7f-eaf1-4a1c-a5e0-47e38fad69cd" containerName="nova-scheduler-scheduler" containerID="cri-o://3a19d95d8c512c752245f45bd86e82d06ece7dca89b7728bb08c93a55df41ed3" gracePeriod=30 Nov 26 14:43:14 crc kubenswrapper[5037]: I1126 14:43:14.966872 5037 generic.go:334] "Generic (PLEG): container finished" podID="55121b7f-eaf1-4a1c-a5e0-47e38fad69cd" containerID="3a19d95d8c512c752245f45bd86e82d06ece7dca89b7728bb08c93a55df41ed3" exitCode=0 Nov 26 14:43:14 crc kubenswrapper[5037]: I1126 14:43:14.967435 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="74fa2905-fe61-4b29-a77f-b388199afd56" containerName="nova-api-log" containerID="cri-o://81412ce1a09f43f764475b0e6b8620173942a665fb9746d68f021aa83a2d3888" gracePeriod=30 Nov 26 14:43:14 crc kubenswrapper[5037]: I1126 14:43:14.966943 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"55121b7f-eaf1-4a1c-a5e0-47e38fad69cd","Type":"ContainerDied","Data":"3a19d95d8c512c752245f45bd86e82d06ece7dca89b7728bb08c93a55df41ed3"} Nov 26 14:43:14 crc kubenswrapper[5037]: I1126 14:43:14.968306 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="74fa2905-fe61-4b29-a77f-b388199afd56" containerName="nova-api-api" containerID="cri-o://254fed9a6c7832cd9c31c9fa91219866f069537254a0a25f6027823b0cde8c85" gracePeriod=30 Nov 26 14:43:14 crc kubenswrapper[5037]: I1126 14:43:14.993423 5037 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="74fa2905-fe61-4b29-a77f-b388199afd56" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.195:8774/\": EOF" Nov 26 14:43:14 crc kubenswrapper[5037]: I1126 14:43:14.993745 5037 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="74fa2905-fe61-4b29-a77f-b388199afd56" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.195:8774/\": EOF" Nov 26 14:43:15 crc kubenswrapper[5037]: I1126 14:43:15.052807 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 26 14:43:15 crc kubenswrapper[5037]: I1126 14:43:15.478261 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 14:43:15 crc kubenswrapper[5037]: I1126 14:43:15.577661 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gmw2j\" (UniqueName: \"kubernetes.io/projected/55121b7f-eaf1-4a1c-a5e0-47e38fad69cd-kube-api-access-gmw2j\") pod \"55121b7f-eaf1-4a1c-a5e0-47e38fad69cd\" (UID: \"55121b7f-eaf1-4a1c-a5e0-47e38fad69cd\") " Nov 26 14:43:15 crc kubenswrapper[5037]: I1126 14:43:15.577962 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55121b7f-eaf1-4a1c-a5e0-47e38fad69cd-combined-ca-bundle\") pod \"55121b7f-eaf1-4a1c-a5e0-47e38fad69cd\" (UID: \"55121b7f-eaf1-4a1c-a5e0-47e38fad69cd\") " Nov 26 14:43:15 crc kubenswrapper[5037]: I1126 14:43:15.578977 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55121b7f-eaf1-4a1c-a5e0-47e38fad69cd-config-data\") pod \"55121b7f-eaf1-4a1c-a5e0-47e38fad69cd\" (UID: \"55121b7f-eaf1-4a1c-a5e0-47e38fad69cd\") " Nov 26 14:43:15 crc kubenswrapper[5037]: I1126 14:43:15.592606 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55121b7f-eaf1-4a1c-a5e0-47e38fad69cd-kube-api-access-gmw2j" (OuterVolumeSpecName: "kube-api-access-gmw2j") pod "55121b7f-eaf1-4a1c-a5e0-47e38fad69cd" (UID: "55121b7f-eaf1-4a1c-a5e0-47e38fad69cd"). InnerVolumeSpecName "kube-api-access-gmw2j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:43:15 crc kubenswrapper[5037]: I1126 14:43:15.607236 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55121b7f-eaf1-4a1c-a5e0-47e38fad69cd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "55121b7f-eaf1-4a1c-a5e0-47e38fad69cd" (UID: "55121b7f-eaf1-4a1c-a5e0-47e38fad69cd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:43:15 crc kubenswrapper[5037]: I1126 14:43:15.615512 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55121b7f-eaf1-4a1c-a5e0-47e38fad69cd-config-data" (OuterVolumeSpecName: "config-data") pod "55121b7f-eaf1-4a1c-a5e0-47e38fad69cd" (UID: "55121b7f-eaf1-4a1c-a5e0-47e38fad69cd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:43:15 crc kubenswrapper[5037]: I1126 14:43:15.681094 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gmw2j\" (UniqueName: \"kubernetes.io/projected/55121b7f-eaf1-4a1c-a5e0-47e38fad69cd-kube-api-access-gmw2j\") on node \"crc\" DevicePath \"\"" Nov 26 14:43:15 crc kubenswrapper[5037]: I1126 14:43:15.681132 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55121b7f-eaf1-4a1c-a5e0-47e38fad69cd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:43:15 crc kubenswrapper[5037]: I1126 14:43:15.681146 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55121b7f-eaf1-4a1c-a5e0-47e38fad69cd-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:43:15 crc kubenswrapper[5037]: I1126 14:43:15.978747 5037 generic.go:334] "Generic (PLEG): container finished" podID="74fa2905-fe61-4b29-a77f-b388199afd56" containerID="81412ce1a09f43f764475b0e6b8620173942a665fb9746d68f021aa83a2d3888" exitCode=143 Nov 26 14:43:15 crc kubenswrapper[5037]: I1126 14:43:15.978867 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"74fa2905-fe61-4b29-a77f-b388199afd56","Type":"ContainerDied","Data":"81412ce1a09f43f764475b0e6b8620173942a665fb9746d68f021aa83a2d3888"} Nov 26 14:43:15 crc kubenswrapper[5037]: I1126 14:43:15.984982 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 14:43:15 crc kubenswrapper[5037]: I1126 14:43:15.984979 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"55121b7f-eaf1-4a1c-a5e0-47e38fad69cd","Type":"ContainerDied","Data":"9af1effdaaf9758f1bab39e17b3aa1a54845356e71139d5d2a54b787ab0e6a6d"} Nov 26 14:43:15 crc kubenswrapper[5037]: I1126 14:43:15.985050 5037 scope.go:117] "RemoveContainer" containerID="3a19d95d8c512c752245f45bd86e82d06ece7dca89b7728bb08c93a55df41ed3" Nov 26 14:43:15 crc kubenswrapper[5037]: I1126 14:43:15.985134 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="818cd85a-3db7-4a42-ac8e-5bf11c024493" containerName="nova-metadata-log" containerID="cri-o://3f18a20df312ccff720b79feb2c26822f029e7d1990ee69dcd19116d22023de4" gracePeriod=30 Nov 26 14:43:15 crc kubenswrapper[5037]: I1126 14:43:15.985207 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="818cd85a-3db7-4a42-ac8e-5bf11c024493" containerName="nova-metadata-metadata" containerID="cri-o://c6118d2fae4740b5164dbd3b99d85a64d2afc4b33c78c0221ff4c7e1f97aaf0f" gracePeriod=30 Nov 26 14:43:16 crc kubenswrapper[5037]: I1126 14:43:16.024621 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 14:43:16 crc kubenswrapper[5037]: I1126 14:43:16.038248 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 14:43:16 crc kubenswrapper[5037]: I1126 14:43:16.054389 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 14:43:16 crc kubenswrapper[5037]: E1126 14:43:16.054850 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a48383c9-d4b8-4b6d-8809-3be689c45803" containerName="nova-manage" Nov 26 14:43:16 crc kubenswrapper[5037]: I1126 14:43:16.054869 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="a48383c9-d4b8-4b6d-8809-3be689c45803" containerName="nova-manage" Nov 26 14:43:16 crc kubenswrapper[5037]: E1126 14:43:16.054883 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55121b7f-eaf1-4a1c-a5e0-47e38fad69cd" containerName="nova-scheduler-scheduler" Nov 26 14:43:16 crc kubenswrapper[5037]: I1126 14:43:16.054890 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="55121b7f-eaf1-4a1c-a5e0-47e38fad69cd" containerName="nova-scheduler-scheduler" Nov 26 14:43:16 crc kubenswrapper[5037]: E1126 14:43:16.054909 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="faa2f8fc-e417-40ea-bd09-280b79a99548" containerName="dnsmasq-dns" Nov 26 14:43:16 crc kubenswrapper[5037]: I1126 14:43:16.054916 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="faa2f8fc-e417-40ea-bd09-280b79a99548" containerName="dnsmasq-dns" Nov 26 14:43:16 crc kubenswrapper[5037]: E1126 14:43:16.054936 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="faa2f8fc-e417-40ea-bd09-280b79a99548" containerName="init" Nov 26 14:43:16 crc kubenswrapper[5037]: I1126 14:43:16.054942 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="faa2f8fc-e417-40ea-bd09-280b79a99548" containerName="init" Nov 26 14:43:16 crc kubenswrapper[5037]: I1126 14:43:16.055238 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="a48383c9-d4b8-4b6d-8809-3be689c45803" containerName="nova-manage" Nov 26 14:43:16 crc kubenswrapper[5037]: I1126 14:43:16.055255 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="faa2f8fc-e417-40ea-bd09-280b79a99548" containerName="dnsmasq-dns" Nov 26 14:43:16 crc kubenswrapper[5037]: I1126 14:43:16.055267 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="55121b7f-eaf1-4a1c-a5e0-47e38fad69cd" containerName="nova-scheduler-scheduler" Nov 26 14:43:16 crc kubenswrapper[5037]: I1126 14:43:16.056057 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 14:43:16 crc kubenswrapper[5037]: I1126 14:43:16.061610 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 26 14:43:16 crc kubenswrapper[5037]: I1126 14:43:16.066108 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 14:43:16 crc kubenswrapper[5037]: I1126 14:43:16.193725 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c3c49ff-cf53-4b5b-ba83-10877d499763-config-data\") pod \"nova-scheduler-0\" (UID: \"9c3c49ff-cf53-4b5b-ba83-10877d499763\") " pod="openstack/nova-scheduler-0" Nov 26 14:43:16 crc kubenswrapper[5037]: I1126 14:43:16.193803 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c3c49ff-cf53-4b5b-ba83-10877d499763-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"9c3c49ff-cf53-4b5b-ba83-10877d499763\") " pod="openstack/nova-scheduler-0" Nov 26 14:43:16 crc kubenswrapper[5037]: I1126 14:43:16.194161 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d9zrl\" (UniqueName: \"kubernetes.io/projected/9c3c49ff-cf53-4b5b-ba83-10877d499763-kube-api-access-d9zrl\") pod \"nova-scheduler-0\" (UID: \"9c3c49ff-cf53-4b5b-ba83-10877d499763\") " pod="openstack/nova-scheduler-0" Nov 26 14:43:16 crc kubenswrapper[5037]: I1126 14:43:16.296002 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c3c49ff-cf53-4b5b-ba83-10877d499763-config-data\") pod \"nova-scheduler-0\" (UID: \"9c3c49ff-cf53-4b5b-ba83-10877d499763\") " pod="openstack/nova-scheduler-0" Nov 26 14:43:16 crc kubenswrapper[5037]: I1126 14:43:16.296059 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c3c49ff-cf53-4b5b-ba83-10877d499763-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"9c3c49ff-cf53-4b5b-ba83-10877d499763\") " pod="openstack/nova-scheduler-0" Nov 26 14:43:16 crc kubenswrapper[5037]: I1126 14:43:16.296109 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d9zrl\" (UniqueName: \"kubernetes.io/projected/9c3c49ff-cf53-4b5b-ba83-10877d499763-kube-api-access-d9zrl\") pod \"nova-scheduler-0\" (UID: \"9c3c49ff-cf53-4b5b-ba83-10877d499763\") " pod="openstack/nova-scheduler-0" Nov 26 14:43:16 crc kubenswrapper[5037]: I1126 14:43:16.301023 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c3c49ff-cf53-4b5b-ba83-10877d499763-config-data\") pod \"nova-scheduler-0\" (UID: \"9c3c49ff-cf53-4b5b-ba83-10877d499763\") " pod="openstack/nova-scheduler-0" Nov 26 14:43:16 crc kubenswrapper[5037]: I1126 14:43:16.307824 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c3c49ff-cf53-4b5b-ba83-10877d499763-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"9c3c49ff-cf53-4b5b-ba83-10877d499763\") " pod="openstack/nova-scheduler-0" Nov 26 14:43:16 crc kubenswrapper[5037]: I1126 14:43:16.336509 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d9zrl\" (UniqueName: \"kubernetes.io/projected/9c3c49ff-cf53-4b5b-ba83-10877d499763-kube-api-access-d9zrl\") pod \"nova-scheduler-0\" (UID: \"9c3c49ff-cf53-4b5b-ba83-10877d499763\") " pod="openstack/nova-scheduler-0" Nov 26 14:43:16 crc kubenswrapper[5037]: I1126 14:43:16.393979 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 14:43:16 crc kubenswrapper[5037]: I1126 14:43:16.837184 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 14:43:16 crc kubenswrapper[5037]: I1126 14:43:16.995635 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9c3c49ff-cf53-4b5b-ba83-10877d499763","Type":"ContainerStarted","Data":"38e70d5032c83e2af81154618fd9ef141af24eb0ab39c42a6c5f5f453ba0b9af"} Nov 26 14:43:17 crc kubenswrapper[5037]: I1126 14:43:17.000406 5037 generic.go:334] "Generic (PLEG): container finished" podID="818cd85a-3db7-4a42-ac8e-5bf11c024493" containerID="3f18a20df312ccff720b79feb2c26822f029e7d1990ee69dcd19116d22023de4" exitCode=143 Nov 26 14:43:17 crc kubenswrapper[5037]: I1126 14:43:17.000430 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"818cd85a-3db7-4a42-ac8e-5bf11c024493","Type":"ContainerDied","Data":"3f18a20df312ccff720b79feb2c26822f029e7d1990ee69dcd19116d22023de4"} Nov 26 14:43:17 crc kubenswrapper[5037]: I1126 14:43:17.908804 5037 scope.go:117] "RemoveContainer" containerID="5e69d7717514aa68d798cc4f8eee9b2d5d3e9666ca3b110c2cb4c6b90f9e1181" Nov 26 14:43:17 crc kubenswrapper[5037]: E1126 14:43:17.909589 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:43:17 crc kubenswrapper[5037]: I1126 14:43:17.920462 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55121b7f-eaf1-4a1c-a5e0-47e38fad69cd" path="/var/lib/kubelet/pods/55121b7f-eaf1-4a1c-a5e0-47e38fad69cd/volumes" Nov 26 14:43:18 crc kubenswrapper[5037]: I1126 14:43:18.012092 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9c3c49ff-cf53-4b5b-ba83-10877d499763","Type":"ContainerStarted","Data":"b37dad75bebb2e8ff92fd84a2c83e4b7a2ff235be32f88191ebf7baf5089d611"} Nov 26 14:43:18 crc kubenswrapper[5037]: I1126 14:43:18.034129 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.034106723 podStartE2EDuration="2.034106723s" podCreationTimestamp="2025-11-26 14:43:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:43:18.023902704 +0000 UTC m=+1664.820672888" watchObservedRunningTime="2025-11-26 14:43:18.034106723 +0000 UTC m=+1664.830876907" Nov 26 14:43:19 crc kubenswrapper[5037]: I1126 14:43:19.144769 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="818cd85a-3db7-4a42-ac8e-5bf11c024493" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.191:8775/\": read tcp 10.217.0.2:32778->10.217.0.191:8775: read: connection reset by peer" Nov 26 14:43:19 crc kubenswrapper[5037]: I1126 14:43:19.144823 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="818cd85a-3db7-4a42-ac8e-5bf11c024493" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.191:8775/\": read tcp 10.217.0.2:32776->10.217.0.191:8775: read: connection reset by peer" Nov 26 14:43:19 crc kubenswrapper[5037]: I1126 14:43:19.564911 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 14:43:19 crc kubenswrapper[5037]: I1126 14:43:19.676882 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/818cd85a-3db7-4a42-ac8e-5bf11c024493-config-data\") pod \"818cd85a-3db7-4a42-ac8e-5bf11c024493\" (UID: \"818cd85a-3db7-4a42-ac8e-5bf11c024493\") " Nov 26 14:43:19 crc kubenswrapper[5037]: I1126 14:43:19.677005 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gdb5c\" (UniqueName: \"kubernetes.io/projected/818cd85a-3db7-4a42-ac8e-5bf11c024493-kube-api-access-gdb5c\") pod \"818cd85a-3db7-4a42-ac8e-5bf11c024493\" (UID: \"818cd85a-3db7-4a42-ac8e-5bf11c024493\") " Nov 26 14:43:19 crc kubenswrapper[5037]: I1126 14:43:19.677042 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/818cd85a-3db7-4a42-ac8e-5bf11c024493-combined-ca-bundle\") pod \"818cd85a-3db7-4a42-ac8e-5bf11c024493\" (UID: \"818cd85a-3db7-4a42-ac8e-5bf11c024493\") " Nov 26 14:43:19 crc kubenswrapper[5037]: I1126 14:43:19.677096 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/818cd85a-3db7-4a42-ac8e-5bf11c024493-nova-metadata-tls-certs\") pod \"818cd85a-3db7-4a42-ac8e-5bf11c024493\" (UID: \"818cd85a-3db7-4a42-ac8e-5bf11c024493\") " Nov 26 14:43:19 crc kubenswrapper[5037]: I1126 14:43:19.677158 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/818cd85a-3db7-4a42-ac8e-5bf11c024493-logs\") pod \"818cd85a-3db7-4a42-ac8e-5bf11c024493\" (UID: \"818cd85a-3db7-4a42-ac8e-5bf11c024493\") " Nov 26 14:43:19 crc kubenswrapper[5037]: I1126 14:43:19.677902 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/818cd85a-3db7-4a42-ac8e-5bf11c024493-logs" (OuterVolumeSpecName: "logs") pod "818cd85a-3db7-4a42-ac8e-5bf11c024493" (UID: "818cd85a-3db7-4a42-ac8e-5bf11c024493"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:43:19 crc kubenswrapper[5037]: I1126 14:43:19.682445 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/818cd85a-3db7-4a42-ac8e-5bf11c024493-kube-api-access-gdb5c" (OuterVolumeSpecName: "kube-api-access-gdb5c") pod "818cd85a-3db7-4a42-ac8e-5bf11c024493" (UID: "818cd85a-3db7-4a42-ac8e-5bf11c024493"). InnerVolumeSpecName "kube-api-access-gdb5c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:43:19 crc kubenswrapper[5037]: I1126 14:43:19.704040 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/818cd85a-3db7-4a42-ac8e-5bf11c024493-config-data" (OuterVolumeSpecName: "config-data") pod "818cd85a-3db7-4a42-ac8e-5bf11c024493" (UID: "818cd85a-3db7-4a42-ac8e-5bf11c024493"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:43:19 crc kubenswrapper[5037]: I1126 14:43:19.733252 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/818cd85a-3db7-4a42-ac8e-5bf11c024493-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "818cd85a-3db7-4a42-ac8e-5bf11c024493" (UID: "818cd85a-3db7-4a42-ac8e-5bf11c024493"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:43:19 crc kubenswrapper[5037]: I1126 14:43:19.740655 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/818cd85a-3db7-4a42-ac8e-5bf11c024493-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "818cd85a-3db7-4a42-ac8e-5bf11c024493" (UID: "818cd85a-3db7-4a42-ac8e-5bf11c024493"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:43:19 crc kubenswrapper[5037]: I1126 14:43:19.779795 5037 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/818cd85a-3db7-4a42-ac8e-5bf11c024493-logs\") on node \"crc\" DevicePath \"\"" Nov 26 14:43:19 crc kubenswrapper[5037]: I1126 14:43:19.780004 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/818cd85a-3db7-4a42-ac8e-5bf11c024493-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:43:19 crc kubenswrapper[5037]: I1126 14:43:19.780075 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gdb5c\" (UniqueName: \"kubernetes.io/projected/818cd85a-3db7-4a42-ac8e-5bf11c024493-kube-api-access-gdb5c\") on node \"crc\" DevicePath \"\"" Nov 26 14:43:19 crc kubenswrapper[5037]: I1126 14:43:19.780133 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/818cd85a-3db7-4a42-ac8e-5bf11c024493-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:43:19 crc kubenswrapper[5037]: I1126 14:43:19.780205 5037 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/818cd85a-3db7-4a42-ac8e-5bf11c024493-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:43:20 crc kubenswrapper[5037]: I1126 14:43:20.036508 5037 generic.go:334] "Generic (PLEG): container finished" podID="818cd85a-3db7-4a42-ac8e-5bf11c024493" containerID="c6118d2fae4740b5164dbd3b99d85a64d2afc4b33c78c0221ff4c7e1f97aaf0f" exitCode=0 Nov 26 14:43:20 crc kubenswrapper[5037]: I1126 14:43:20.036584 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 14:43:20 crc kubenswrapper[5037]: I1126 14:43:20.036580 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"818cd85a-3db7-4a42-ac8e-5bf11c024493","Type":"ContainerDied","Data":"c6118d2fae4740b5164dbd3b99d85a64d2afc4b33c78c0221ff4c7e1f97aaf0f"} Nov 26 14:43:20 crc kubenswrapper[5037]: I1126 14:43:20.036984 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"818cd85a-3db7-4a42-ac8e-5bf11c024493","Type":"ContainerDied","Data":"cfb3dd29df00c0079de45a4f0ecd63155e8ed7fe67c57b9605c7f70080540920"} Nov 26 14:43:20 crc kubenswrapper[5037]: I1126 14:43:20.037032 5037 scope.go:117] "RemoveContainer" containerID="c6118d2fae4740b5164dbd3b99d85a64d2afc4b33c78c0221ff4c7e1f97aaf0f" Nov 26 14:43:20 crc kubenswrapper[5037]: I1126 14:43:20.068475 5037 scope.go:117] "RemoveContainer" containerID="3f18a20df312ccff720b79feb2c26822f029e7d1990ee69dcd19116d22023de4" Nov 26 14:43:20 crc kubenswrapper[5037]: I1126 14:43:20.068827 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 14:43:20 crc kubenswrapper[5037]: I1126 14:43:20.084594 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 14:43:20 crc kubenswrapper[5037]: I1126 14:43:20.101670 5037 scope.go:117] "RemoveContainer" containerID="c6118d2fae4740b5164dbd3b99d85a64d2afc4b33c78c0221ff4c7e1f97aaf0f" Nov 26 14:43:20 crc kubenswrapper[5037]: I1126 14:43:20.102212 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 26 14:43:20 crc kubenswrapper[5037]: E1126 14:43:20.102481 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c6118d2fae4740b5164dbd3b99d85a64d2afc4b33c78c0221ff4c7e1f97aaf0f\": container with ID starting with c6118d2fae4740b5164dbd3b99d85a64d2afc4b33c78c0221ff4c7e1f97aaf0f not found: ID does not exist" containerID="c6118d2fae4740b5164dbd3b99d85a64d2afc4b33c78c0221ff4c7e1f97aaf0f" Nov 26 14:43:20 crc kubenswrapper[5037]: I1126 14:43:20.102628 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6118d2fae4740b5164dbd3b99d85a64d2afc4b33c78c0221ff4c7e1f97aaf0f"} err="failed to get container status \"c6118d2fae4740b5164dbd3b99d85a64d2afc4b33c78c0221ff4c7e1f97aaf0f\": rpc error: code = NotFound desc = could not find container \"c6118d2fae4740b5164dbd3b99d85a64d2afc4b33c78c0221ff4c7e1f97aaf0f\": container with ID starting with c6118d2fae4740b5164dbd3b99d85a64d2afc4b33c78c0221ff4c7e1f97aaf0f not found: ID does not exist" Nov 26 14:43:20 crc kubenswrapper[5037]: I1126 14:43:20.102755 5037 scope.go:117] "RemoveContainer" containerID="3f18a20df312ccff720b79feb2c26822f029e7d1990ee69dcd19116d22023de4" Nov 26 14:43:20 crc kubenswrapper[5037]: E1126 14:43:20.102718 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="818cd85a-3db7-4a42-ac8e-5bf11c024493" containerName="nova-metadata-log" Nov 26 14:43:20 crc kubenswrapper[5037]: I1126 14:43:20.103023 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="818cd85a-3db7-4a42-ac8e-5bf11c024493" containerName="nova-metadata-log" Nov 26 14:43:20 crc kubenswrapper[5037]: E1126 14:43:20.103256 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="818cd85a-3db7-4a42-ac8e-5bf11c024493" containerName="nova-metadata-metadata" Nov 26 14:43:20 crc kubenswrapper[5037]: I1126 14:43:20.103403 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="818cd85a-3db7-4a42-ac8e-5bf11c024493" containerName="nova-metadata-metadata" Nov 26 14:43:20 crc kubenswrapper[5037]: I1126 14:43:20.104066 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="818cd85a-3db7-4a42-ac8e-5bf11c024493" containerName="nova-metadata-metadata" Nov 26 14:43:20 crc kubenswrapper[5037]: I1126 14:43:20.104211 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="818cd85a-3db7-4a42-ac8e-5bf11c024493" containerName="nova-metadata-log" Nov 26 14:43:20 crc kubenswrapper[5037]: E1126 14:43:20.104202 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3f18a20df312ccff720b79feb2c26822f029e7d1990ee69dcd19116d22023de4\": container with ID starting with 3f18a20df312ccff720b79feb2c26822f029e7d1990ee69dcd19116d22023de4 not found: ID does not exist" containerID="3f18a20df312ccff720b79feb2c26822f029e7d1990ee69dcd19116d22023de4" Nov 26 14:43:20 crc kubenswrapper[5037]: I1126 14:43:20.104539 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f18a20df312ccff720b79feb2c26822f029e7d1990ee69dcd19116d22023de4"} err="failed to get container status \"3f18a20df312ccff720b79feb2c26822f029e7d1990ee69dcd19116d22023de4\": rpc error: code = NotFound desc = could not find container \"3f18a20df312ccff720b79feb2c26822f029e7d1990ee69dcd19116d22023de4\": container with ID starting with 3f18a20df312ccff720b79feb2c26822f029e7d1990ee69dcd19116d22023de4 not found: ID does not exist" Nov 26 14:43:20 crc kubenswrapper[5037]: I1126 14:43:20.106311 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 14:43:20 crc kubenswrapper[5037]: I1126 14:43:20.108783 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 26 14:43:20 crc kubenswrapper[5037]: I1126 14:43:20.108788 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 26 14:43:20 crc kubenswrapper[5037]: I1126 14:43:20.111533 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 14:43:20 crc kubenswrapper[5037]: I1126 14:43:20.292108 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e-config-data\") pod \"nova-metadata-0\" (UID: \"e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e\") " pod="openstack/nova-metadata-0" Nov 26 14:43:20 crc kubenswrapper[5037]: I1126 14:43:20.292200 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e\") " pod="openstack/nova-metadata-0" Nov 26 14:43:20 crc kubenswrapper[5037]: I1126 14:43:20.292679 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e\") " pod="openstack/nova-metadata-0" Nov 26 14:43:20 crc kubenswrapper[5037]: I1126 14:43:20.292854 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e-logs\") pod \"nova-metadata-0\" (UID: \"e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e\") " pod="openstack/nova-metadata-0" Nov 26 14:43:20 crc kubenswrapper[5037]: I1126 14:43:20.292916 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rsmmv\" (UniqueName: \"kubernetes.io/projected/e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e-kube-api-access-rsmmv\") pod \"nova-metadata-0\" (UID: \"e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e\") " pod="openstack/nova-metadata-0" Nov 26 14:43:20 crc kubenswrapper[5037]: I1126 14:43:20.395088 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e-config-data\") pod \"nova-metadata-0\" (UID: \"e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e\") " pod="openstack/nova-metadata-0" Nov 26 14:43:20 crc kubenswrapper[5037]: I1126 14:43:20.395177 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e\") " pod="openstack/nova-metadata-0" Nov 26 14:43:20 crc kubenswrapper[5037]: I1126 14:43:20.395360 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e\") " pod="openstack/nova-metadata-0" Nov 26 14:43:20 crc kubenswrapper[5037]: I1126 14:43:20.395436 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e-logs\") pod \"nova-metadata-0\" (UID: \"e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e\") " pod="openstack/nova-metadata-0" Nov 26 14:43:20 crc kubenswrapper[5037]: I1126 14:43:20.395471 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rsmmv\" (UniqueName: \"kubernetes.io/projected/e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e-kube-api-access-rsmmv\") pod \"nova-metadata-0\" (UID: \"e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e\") " pod="openstack/nova-metadata-0" Nov 26 14:43:20 crc kubenswrapper[5037]: I1126 14:43:20.395886 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e-logs\") pod \"nova-metadata-0\" (UID: \"e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e\") " pod="openstack/nova-metadata-0" Nov 26 14:43:20 crc kubenswrapper[5037]: I1126 14:43:20.400862 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e\") " pod="openstack/nova-metadata-0" Nov 26 14:43:20 crc kubenswrapper[5037]: I1126 14:43:20.409463 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e-config-data\") pod \"nova-metadata-0\" (UID: \"e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e\") " pod="openstack/nova-metadata-0" Nov 26 14:43:20 crc kubenswrapper[5037]: I1126 14:43:20.413546 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e\") " pod="openstack/nova-metadata-0" Nov 26 14:43:20 crc kubenswrapper[5037]: I1126 14:43:20.416685 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rsmmv\" (UniqueName: \"kubernetes.io/projected/e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e-kube-api-access-rsmmv\") pod \"nova-metadata-0\" (UID: \"e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e\") " pod="openstack/nova-metadata-0" Nov 26 14:43:20 crc kubenswrapper[5037]: I1126 14:43:20.432372 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 14:43:20 crc kubenswrapper[5037]: I1126 14:43:20.882654 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.006464 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.010551 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74fa2905-fe61-4b29-a77f-b388199afd56-combined-ca-bundle\") pod \"74fa2905-fe61-4b29-a77f-b388199afd56\" (UID: \"74fa2905-fe61-4b29-a77f-b388199afd56\") " Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.010647 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74fa2905-fe61-4b29-a77f-b388199afd56-config-data\") pod \"74fa2905-fe61-4b29-a77f-b388199afd56\" (UID: \"74fa2905-fe61-4b29-a77f-b388199afd56\") " Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.010665 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/74fa2905-fe61-4b29-a77f-b388199afd56-public-tls-certs\") pod \"74fa2905-fe61-4b29-a77f-b388199afd56\" (UID: \"74fa2905-fe61-4b29-a77f-b388199afd56\") " Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.010732 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9r54g\" (UniqueName: \"kubernetes.io/projected/74fa2905-fe61-4b29-a77f-b388199afd56-kube-api-access-9r54g\") pod \"74fa2905-fe61-4b29-a77f-b388199afd56\" (UID: \"74fa2905-fe61-4b29-a77f-b388199afd56\") " Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.010790 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/74fa2905-fe61-4b29-a77f-b388199afd56-internal-tls-certs\") pod \"74fa2905-fe61-4b29-a77f-b388199afd56\" (UID: \"74fa2905-fe61-4b29-a77f-b388199afd56\") " Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.010888 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/74fa2905-fe61-4b29-a77f-b388199afd56-logs\") pod \"74fa2905-fe61-4b29-a77f-b388199afd56\" (UID: \"74fa2905-fe61-4b29-a77f-b388199afd56\") " Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.012629 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/74fa2905-fe61-4b29-a77f-b388199afd56-logs" (OuterVolumeSpecName: "logs") pod "74fa2905-fe61-4b29-a77f-b388199afd56" (UID: "74fa2905-fe61-4b29-a77f-b388199afd56"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.017771 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/74fa2905-fe61-4b29-a77f-b388199afd56-kube-api-access-9r54g" (OuterVolumeSpecName: "kube-api-access-9r54g") pod "74fa2905-fe61-4b29-a77f-b388199afd56" (UID: "74fa2905-fe61-4b29-a77f-b388199afd56"). InnerVolumeSpecName "kube-api-access-9r54g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.040038 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74fa2905-fe61-4b29-a77f-b388199afd56-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "74fa2905-fe61-4b29-a77f-b388199afd56" (UID: "74fa2905-fe61-4b29-a77f-b388199afd56"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.047692 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e","Type":"ContainerStarted","Data":"8ab7134009058e52030c6f00fcc6be72d410ef8854fda4b58df18f65f38fabcf"} Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.049697 5037 generic.go:334] "Generic (PLEG): container finished" podID="74fa2905-fe61-4b29-a77f-b388199afd56" containerID="254fed9a6c7832cd9c31c9fa91219866f069537254a0a25f6027823b0cde8c85" exitCode=0 Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.049757 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.049771 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"74fa2905-fe61-4b29-a77f-b388199afd56","Type":"ContainerDied","Data":"254fed9a6c7832cd9c31c9fa91219866f069537254a0a25f6027823b0cde8c85"} Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.049839 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"74fa2905-fe61-4b29-a77f-b388199afd56","Type":"ContainerDied","Data":"fc6f0dd8361442b18687f801cf4e77bcdec230add5e03ff56f295891c6713768"} Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.049858 5037 scope.go:117] "RemoveContainer" containerID="254fed9a6c7832cd9c31c9fa91219866f069537254a0a25f6027823b0cde8c85" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.057827 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74fa2905-fe61-4b29-a77f-b388199afd56-config-data" (OuterVolumeSpecName: "config-data") pod "74fa2905-fe61-4b29-a77f-b388199afd56" (UID: "74fa2905-fe61-4b29-a77f-b388199afd56"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.060996 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74fa2905-fe61-4b29-a77f-b388199afd56-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "74fa2905-fe61-4b29-a77f-b388199afd56" (UID: "74fa2905-fe61-4b29-a77f-b388199afd56"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.071694 5037 scope.go:117] "RemoveContainer" containerID="81412ce1a09f43f764475b0e6b8620173942a665fb9746d68f021aa83a2d3888" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.072130 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74fa2905-fe61-4b29-a77f-b388199afd56-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "74fa2905-fe61-4b29-a77f-b388199afd56" (UID: "74fa2905-fe61-4b29-a77f-b388199afd56"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.096686 5037 scope.go:117] "RemoveContainer" containerID="254fed9a6c7832cd9c31c9fa91219866f069537254a0a25f6027823b0cde8c85" Nov 26 14:43:21 crc kubenswrapper[5037]: E1126 14:43:21.097469 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"254fed9a6c7832cd9c31c9fa91219866f069537254a0a25f6027823b0cde8c85\": container with ID starting with 254fed9a6c7832cd9c31c9fa91219866f069537254a0a25f6027823b0cde8c85 not found: ID does not exist" containerID="254fed9a6c7832cd9c31c9fa91219866f069537254a0a25f6027823b0cde8c85" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.097511 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"254fed9a6c7832cd9c31c9fa91219866f069537254a0a25f6027823b0cde8c85"} err="failed to get container status \"254fed9a6c7832cd9c31c9fa91219866f069537254a0a25f6027823b0cde8c85\": rpc error: code = NotFound desc = could not find container \"254fed9a6c7832cd9c31c9fa91219866f069537254a0a25f6027823b0cde8c85\": container with ID starting with 254fed9a6c7832cd9c31c9fa91219866f069537254a0a25f6027823b0cde8c85 not found: ID does not exist" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.097538 5037 scope.go:117] "RemoveContainer" containerID="81412ce1a09f43f764475b0e6b8620173942a665fb9746d68f021aa83a2d3888" Nov 26 14:43:21 crc kubenswrapper[5037]: E1126 14:43:21.097910 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"81412ce1a09f43f764475b0e6b8620173942a665fb9746d68f021aa83a2d3888\": container with ID starting with 81412ce1a09f43f764475b0e6b8620173942a665fb9746d68f021aa83a2d3888 not found: ID does not exist" containerID="81412ce1a09f43f764475b0e6b8620173942a665fb9746d68f021aa83a2d3888" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.097936 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"81412ce1a09f43f764475b0e6b8620173942a665fb9746d68f021aa83a2d3888"} err="failed to get container status \"81412ce1a09f43f764475b0e6b8620173942a665fb9746d68f021aa83a2d3888\": rpc error: code = NotFound desc = could not find container \"81412ce1a09f43f764475b0e6b8620173942a665fb9746d68f021aa83a2d3888\": container with ID starting with 81412ce1a09f43f764475b0e6b8620173942a665fb9746d68f021aa83a2d3888 not found: ID does not exist" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.113234 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74fa2905-fe61-4b29-a77f-b388199afd56-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.113314 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74fa2905-fe61-4b29-a77f-b388199afd56-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.113335 5037 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/74fa2905-fe61-4b29-a77f-b388199afd56-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.113350 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9r54g\" (UniqueName: \"kubernetes.io/projected/74fa2905-fe61-4b29-a77f-b388199afd56-kube-api-access-9r54g\") on node \"crc\" DevicePath \"\"" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.113365 5037 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/74fa2905-fe61-4b29-a77f-b388199afd56-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.113407 5037 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/74fa2905-fe61-4b29-a77f-b388199afd56-logs\") on node \"crc\" DevicePath \"\"" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.395530 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.406062 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.430804 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.456731 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 26 14:43:21 crc kubenswrapper[5037]: E1126 14:43:21.457390 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74fa2905-fe61-4b29-a77f-b388199afd56" containerName="nova-api-log" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.457509 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="74fa2905-fe61-4b29-a77f-b388199afd56" containerName="nova-api-log" Nov 26 14:43:21 crc kubenswrapper[5037]: E1126 14:43:21.457606 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74fa2905-fe61-4b29-a77f-b388199afd56" containerName="nova-api-api" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.457663 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="74fa2905-fe61-4b29-a77f-b388199afd56" containerName="nova-api-api" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.457947 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="74fa2905-fe61-4b29-a77f-b388199afd56" containerName="nova-api-log" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.458038 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="74fa2905-fe61-4b29-a77f-b388199afd56" containerName="nova-api-api" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.460227 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.462743 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.462987 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.462748 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.465105 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.622783 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kslw4\" (UniqueName: \"kubernetes.io/projected/cff988a9-69e2-42cc-a456-426f13be8a58-kube-api-access-kslw4\") pod \"nova-api-0\" (UID: \"cff988a9-69e2-42cc-a456-426f13be8a58\") " pod="openstack/nova-api-0" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.622825 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cff988a9-69e2-42cc-a456-426f13be8a58-public-tls-certs\") pod \"nova-api-0\" (UID: \"cff988a9-69e2-42cc-a456-426f13be8a58\") " pod="openstack/nova-api-0" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.622840 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cff988a9-69e2-42cc-a456-426f13be8a58-config-data\") pod \"nova-api-0\" (UID: \"cff988a9-69e2-42cc-a456-426f13be8a58\") " pod="openstack/nova-api-0" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.622886 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cff988a9-69e2-42cc-a456-426f13be8a58-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"cff988a9-69e2-42cc-a456-426f13be8a58\") " pod="openstack/nova-api-0" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.623169 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cff988a9-69e2-42cc-a456-426f13be8a58-logs\") pod \"nova-api-0\" (UID: \"cff988a9-69e2-42cc-a456-426f13be8a58\") " pod="openstack/nova-api-0" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.623382 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cff988a9-69e2-42cc-a456-426f13be8a58-internal-tls-certs\") pod \"nova-api-0\" (UID: \"cff988a9-69e2-42cc-a456-426f13be8a58\") " pod="openstack/nova-api-0" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.725240 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kslw4\" (UniqueName: \"kubernetes.io/projected/cff988a9-69e2-42cc-a456-426f13be8a58-kube-api-access-kslw4\") pod \"nova-api-0\" (UID: \"cff988a9-69e2-42cc-a456-426f13be8a58\") " pod="openstack/nova-api-0" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.725301 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cff988a9-69e2-42cc-a456-426f13be8a58-config-data\") pod \"nova-api-0\" (UID: \"cff988a9-69e2-42cc-a456-426f13be8a58\") " pod="openstack/nova-api-0" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.725320 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cff988a9-69e2-42cc-a456-426f13be8a58-public-tls-certs\") pod \"nova-api-0\" (UID: \"cff988a9-69e2-42cc-a456-426f13be8a58\") " pod="openstack/nova-api-0" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.725366 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cff988a9-69e2-42cc-a456-426f13be8a58-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"cff988a9-69e2-42cc-a456-426f13be8a58\") " pod="openstack/nova-api-0" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.725426 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cff988a9-69e2-42cc-a456-426f13be8a58-logs\") pod \"nova-api-0\" (UID: \"cff988a9-69e2-42cc-a456-426f13be8a58\") " pod="openstack/nova-api-0" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.725471 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cff988a9-69e2-42cc-a456-426f13be8a58-internal-tls-certs\") pod \"nova-api-0\" (UID: \"cff988a9-69e2-42cc-a456-426f13be8a58\") " pod="openstack/nova-api-0" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.726547 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cff988a9-69e2-42cc-a456-426f13be8a58-logs\") pod \"nova-api-0\" (UID: \"cff988a9-69e2-42cc-a456-426f13be8a58\") " pod="openstack/nova-api-0" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.730724 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cff988a9-69e2-42cc-a456-426f13be8a58-public-tls-certs\") pod \"nova-api-0\" (UID: \"cff988a9-69e2-42cc-a456-426f13be8a58\") " pod="openstack/nova-api-0" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.730905 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cff988a9-69e2-42cc-a456-426f13be8a58-config-data\") pod \"nova-api-0\" (UID: \"cff988a9-69e2-42cc-a456-426f13be8a58\") " pod="openstack/nova-api-0" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.730940 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cff988a9-69e2-42cc-a456-426f13be8a58-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"cff988a9-69e2-42cc-a456-426f13be8a58\") " pod="openstack/nova-api-0" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.741700 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cff988a9-69e2-42cc-a456-426f13be8a58-internal-tls-certs\") pod \"nova-api-0\" (UID: \"cff988a9-69e2-42cc-a456-426f13be8a58\") " pod="openstack/nova-api-0" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.741860 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kslw4\" (UniqueName: \"kubernetes.io/projected/cff988a9-69e2-42cc-a456-426f13be8a58-kube-api-access-kslw4\") pod \"nova-api-0\" (UID: \"cff988a9-69e2-42cc-a456-426f13be8a58\") " pod="openstack/nova-api-0" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.778708 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.927681 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="74fa2905-fe61-4b29-a77f-b388199afd56" path="/var/lib/kubelet/pods/74fa2905-fe61-4b29-a77f-b388199afd56/volumes" Nov 26 14:43:21 crc kubenswrapper[5037]: I1126 14:43:21.929117 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="818cd85a-3db7-4a42-ac8e-5bf11c024493" path="/var/lib/kubelet/pods/818cd85a-3db7-4a42-ac8e-5bf11c024493/volumes" Nov 26 14:43:22 crc kubenswrapper[5037]: I1126 14:43:22.065494 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e","Type":"ContainerStarted","Data":"33dcd0b34b2f2fdf22fdb535aa2524ac7c392d11aebfe3891b1a520355c97e29"} Nov 26 14:43:22 crc kubenswrapper[5037]: I1126 14:43:22.065533 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e","Type":"ContainerStarted","Data":"d79abe361aef8985708638422b648d6c91d88cc2db1ffbf2d1c043eb4548ba88"} Nov 26 14:43:22 crc kubenswrapper[5037]: I1126 14:43:22.097368 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.097353839 podStartE2EDuration="2.097353839s" podCreationTimestamp="2025-11-26 14:43:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:43:22.094903849 +0000 UTC m=+1668.891674043" watchObservedRunningTime="2025-11-26 14:43:22.097353839 +0000 UTC m=+1668.894124023" Nov 26 14:43:22 crc kubenswrapper[5037]: I1126 14:43:22.264552 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 14:43:22 crc kubenswrapper[5037]: W1126 14:43:22.265868 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcff988a9_69e2_42cc_a456_426f13be8a58.slice/crio-4ea9727ed78454115502b78b044845297e0a6de7ba85ead9415a911f8fe74e90 WatchSource:0}: Error finding container 4ea9727ed78454115502b78b044845297e0a6de7ba85ead9415a911f8fe74e90: Status 404 returned error can't find the container with id 4ea9727ed78454115502b78b044845297e0a6de7ba85ead9415a911f8fe74e90 Nov 26 14:43:23 crc kubenswrapper[5037]: I1126 14:43:23.075957 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cff988a9-69e2-42cc-a456-426f13be8a58","Type":"ContainerStarted","Data":"cb5de0febf4f6869c6113a77abea3425966e60873437776dd7f265ea84cd9709"} Nov 26 14:43:23 crc kubenswrapper[5037]: I1126 14:43:23.076451 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cff988a9-69e2-42cc-a456-426f13be8a58","Type":"ContainerStarted","Data":"852040b491cc42295268755c4a7220816c3a15eb3bae51127b18b8351d773e4d"} Nov 26 14:43:23 crc kubenswrapper[5037]: I1126 14:43:23.076466 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cff988a9-69e2-42cc-a456-426f13be8a58","Type":"ContainerStarted","Data":"4ea9727ed78454115502b78b044845297e0a6de7ba85ead9415a911f8fe74e90"} Nov 26 14:43:23 crc kubenswrapper[5037]: I1126 14:43:23.108503 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.108482958 podStartE2EDuration="2.108482958s" podCreationTimestamp="2025-11-26 14:43:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:43:23.096504677 +0000 UTC m=+1669.893274861" watchObservedRunningTime="2025-11-26 14:43:23.108482958 +0000 UTC m=+1669.905253142" Nov 26 14:43:25 crc kubenswrapper[5037]: I1126 14:43:25.432991 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 14:43:25 crc kubenswrapper[5037]: I1126 14:43:25.433480 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 14:43:26 crc kubenswrapper[5037]: I1126 14:43:26.395412 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 26 14:43:26 crc kubenswrapper[5037]: I1126 14:43:26.433062 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 26 14:43:27 crc kubenswrapper[5037]: I1126 14:43:27.192963 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 26 14:43:28 crc kubenswrapper[5037]: I1126 14:43:28.908786 5037 scope.go:117] "RemoveContainer" containerID="5e69d7717514aa68d798cc4f8eee9b2d5d3e9666ca3b110c2cb4c6b90f9e1181" Nov 26 14:43:28 crc kubenswrapper[5037]: E1126 14:43:28.909500 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:43:30 crc kubenswrapper[5037]: I1126 14:43:30.433172 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 26 14:43:30 crc kubenswrapper[5037]: I1126 14:43:30.433212 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 26 14:43:31 crc kubenswrapper[5037]: I1126 14:43:31.449492 5037 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.198:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 14:43:31 crc kubenswrapper[5037]: I1126 14:43:31.449503 5037 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.198:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 14:43:31 crc kubenswrapper[5037]: I1126 14:43:31.779657 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 14:43:31 crc kubenswrapper[5037]: I1126 14:43:31.779854 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 14:43:32 crc kubenswrapper[5037]: I1126 14:43:32.794532 5037 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="cff988a9-69e2-42cc-a456-426f13be8a58" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.199:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 14:43:32 crc kubenswrapper[5037]: I1126 14:43:32.794594 5037 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="cff988a9-69e2-42cc-a456-426f13be8a58" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.199:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 14:43:33 crc kubenswrapper[5037]: I1126 14:43:33.436991 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 26 14:43:40 crc kubenswrapper[5037]: I1126 14:43:40.439532 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 26 14:43:40 crc kubenswrapper[5037]: I1126 14:43:40.441502 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 26 14:43:40 crc kubenswrapper[5037]: I1126 14:43:40.447339 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 26 14:43:40 crc kubenswrapper[5037]: I1126 14:43:40.452365 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 26 14:43:40 crc kubenswrapper[5037]: I1126 14:43:40.908069 5037 scope.go:117] "RemoveContainer" containerID="5e69d7717514aa68d798cc4f8eee9b2d5d3e9666ca3b110c2cb4c6b90f9e1181" Nov 26 14:43:40 crc kubenswrapper[5037]: E1126 14:43:40.908343 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:43:41 crc kubenswrapper[5037]: I1126 14:43:41.788980 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 26 14:43:41 crc kubenswrapper[5037]: I1126 14:43:41.789662 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 26 14:43:41 crc kubenswrapper[5037]: I1126 14:43:41.793491 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 26 14:43:41 crc kubenswrapper[5037]: I1126 14:43:41.796177 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 26 14:43:42 crc kubenswrapper[5037]: I1126 14:43:42.313692 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 26 14:43:42 crc kubenswrapper[5037]: I1126 14:43:42.322457 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 26 14:43:55 crc kubenswrapper[5037]: I1126 14:43:55.908758 5037 scope.go:117] "RemoveContainer" containerID="5e69d7717514aa68d798cc4f8eee9b2d5d3e9666ca3b110c2cb4c6b90f9e1181" Nov 26 14:43:55 crc kubenswrapper[5037]: E1126 14:43:55.909546 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:44:01 crc kubenswrapper[5037]: I1126 14:44:01.541248 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Nov 26 14:44:01 crc kubenswrapper[5037]: I1126 14:44:01.546530 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="e7fcafe6-2e7b-4893-84bc-5a3be7029ef7" containerName="openstackclient" containerID="cri-o://963cb889a355aaf5eaa5d102c5937c4f8735b969d035ba5db0079e3607909577" gracePeriod=2 Nov 26 14:44:01 crc kubenswrapper[5037]: I1126 14:44:01.583521 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Nov 26 14:44:01 crc kubenswrapper[5037]: I1126 14:44:01.771207 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement259c-account-delete-rj8qs"] Nov 26 14:44:01 crc kubenswrapper[5037]: E1126 14:44:01.771629 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7fcafe6-2e7b-4893-84bc-5a3be7029ef7" containerName="openstackclient" Nov 26 14:44:01 crc kubenswrapper[5037]: I1126 14:44:01.771646 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7fcafe6-2e7b-4893-84bc-5a3be7029ef7" containerName="openstackclient" Nov 26 14:44:01 crc kubenswrapper[5037]: I1126 14:44:01.771817 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="e7fcafe6-2e7b-4893-84bc-5a3be7029ef7" containerName="openstackclient" Nov 26 14:44:01 crc kubenswrapper[5037]: I1126 14:44:01.772444 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement259c-account-delete-rj8qs" Nov 26 14:44:01 crc kubenswrapper[5037]: I1126 14:44:01.787348 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement259c-account-delete-rj8qs"] Nov 26 14:44:01 crc kubenswrapper[5037]: I1126 14:44:01.851353 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 14:44:01 crc kubenswrapper[5037]: I1126 14:44:01.878078 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 26 14:44:01 crc kubenswrapper[5037]: I1126 14:44:01.878687 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48" containerName="openstack-network-exporter" containerID="cri-o://88d82a9101d1e849e1da1553fa5b16c81210a121618ecb5e38def19cad7dc725" gracePeriod=300 Nov 26 14:44:01 crc kubenswrapper[5037]: I1126 14:44:01.917587 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jgrm7\" (UniqueName: \"kubernetes.io/projected/a7ece585-54a5-40d4-866f-98c968f03910-kube-api-access-jgrm7\") pod \"placement259c-account-delete-rj8qs\" (UID: \"a7ece585-54a5-40d4-866f-98c968f03910\") " pod="openstack/placement259c-account-delete-rj8qs" Nov 26 14:44:01 crc kubenswrapper[5037]: I1126 14:44:01.917881 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a7ece585-54a5-40d4-866f-98c968f03910-operator-scripts\") pod \"placement259c-account-delete-rj8qs\" (UID: \"a7ece585-54a5-40d4-866f-98c968f03910\") " pod="openstack/placement259c-account-delete-rj8qs" Nov 26 14:44:01 crc kubenswrapper[5037]: I1126 14:44:01.947800 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbicanf9d3-account-delete-qgkj4"] Nov 26 14:44:01 crc kubenswrapper[5037]: I1126 14:44:01.950555 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbicanf9d3-account-delete-qgkj4" Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.002507 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbicanf9d3-account-delete-qgkj4"] Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.005025 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48" containerName="ovsdbserver-sb" containerID="cri-o://7755a538fc4266a4b6a0966882c3dd065bcfa6c8249a1b1f4b57bd1f36608b7a" gracePeriod=300 Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.019553 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jgrm7\" (UniqueName: \"kubernetes.io/projected/a7ece585-54a5-40d4-866f-98c968f03910-kube-api-access-jgrm7\") pod \"placement259c-account-delete-rj8qs\" (UID: \"a7ece585-54a5-40d4-866f-98c968f03910\") " pod="openstack/placement259c-account-delete-rj8qs" Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.019683 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a7ece585-54a5-40d4-866f-98c968f03910-operator-scripts\") pod \"placement259c-account-delete-rj8qs\" (UID: \"a7ece585-54a5-40d4-866f-98c968f03910\") " pod="openstack/placement259c-account-delete-rj8qs" Nov 26 14:44:02 crc kubenswrapper[5037]: E1126 14:44:02.023216 5037 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 26 14:44:02 crc kubenswrapper[5037]: E1126 14:44:02.023580 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ba78b94a-32d0-4377-ac41-ffd036b241bf-config-data podName:ba78b94a-32d0-4377-ac41-ffd036b241bf nodeName:}" failed. No retries permitted until 2025-11-26 14:44:02.523277921 +0000 UTC m=+1709.320048105 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/ba78b94a-32d0-4377-ac41-ffd036b241bf-config-data") pod "rabbitmq-server-0" (UID: "ba78b94a-32d0-4377-ac41-ffd036b241bf") : configmap "rabbitmq-config-data" not found Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.024545 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a7ece585-54a5-40d4-866f-98c968f03910-operator-scripts\") pod \"placement259c-account-delete-rj8qs\" (UID: \"a7ece585-54a5-40d4-866f-98c968f03910\") " pod="openstack/placement259c-account-delete-rj8qs" Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.062019 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jgrm7\" (UniqueName: \"kubernetes.io/projected/a7ece585-54a5-40d4-866f-98c968f03910-kube-api-access-jgrm7\") pod \"placement259c-account-delete-rj8qs\" (UID: \"a7ece585-54a5-40d4-866f-98c968f03910\") " pod="openstack/placement259c-account-delete-rj8qs" Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.095410 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.097500 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement259c-account-delete-rj8qs" Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.101883 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron3cec-account-delete-42tll"] Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.102968 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron3cec-account-delete-42tll" Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.124141 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zkmzj\" (UniqueName: \"kubernetes.io/projected/b3dc5e2c-0729-4f4d-8481-bd8fb0064a80-kube-api-access-zkmzj\") pod \"barbicanf9d3-account-delete-qgkj4\" (UID: \"b3dc5e2c-0729-4f4d-8481-bd8fb0064a80\") " pod="openstack/barbicanf9d3-account-delete-qgkj4" Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.124214 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b3dc5e2c-0729-4f4d-8481-bd8fb0064a80-operator-scripts\") pod \"barbicanf9d3-account-delete-qgkj4\" (UID: \"b3dc5e2c-0729-4f4d-8481-bd8fb0064a80\") " pod="openstack/barbicanf9d3-account-delete-qgkj4" Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.149866 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron3cec-account-delete-42tll"] Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.226080 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zkmzj\" (UniqueName: \"kubernetes.io/projected/b3dc5e2c-0729-4f4d-8481-bd8fb0064a80-kube-api-access-zkmzj\") pod \"barbicanf9d3-account-delete-qgkj4\" (UID: \"b3dc5e2c-0729-4f4d-8481-bd8fb0064a80\") " pod="openstack/barbicanf9d3-account-delete-qgkj4" Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.226139 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s7d82\" (UniqueName: \"kubernetes.io/projected/b0797697-2b6d-4684-9fe1-e17a91f80369-kube-api-access-s7d82\") pod \"neutron3cec-account-delete-42tll\" (UID: \"b0797697-2b6d-4684-9fe1-e17a91f80369\") " pod="openstack/neutron3cec-account-delete-42tll" Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.226175 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b3dc5e2c-0729-4f4d-8481-bd8fb0064a80-operator-scripts\") pod \"barbicanf9d3-account-delete-qgkj4\" (UID: \"b3dc5e2c-0729-4f4d-8481-bd8fb0064a80\") " pod="openstack/barbicanf9d3-account-delete-qgkj4" Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.226376 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b0797697-2b6d-4684-9fe1-e17a91f80369-operator-scripts\") pod \"neutron3cec-account-delete-42tll\" (UID: \"b0797697-2b6d-4684-9fe1-e17a91f80369\") " pod="openstack/neutron3cec-account-delete-42tll" Nov 26 14:44:02 crc kubenswrapper[5037]: E1126 14:44:02.227429 5037 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 26 14:44:02 crc kubenswrapper[5037]: E1126 14:44:02.227476 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/7f05291f-1331-411b-9971-c71218d11a35-config-data podName:7f05291f-1331-411b-9971-c71218d11a35 nodeName:}" failed. No retries permitted until 2025-11-26 14:44:02.727460617 +0000 UTC m=+1709.524230791 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/7f05291f-1331-411b-9971-c71218d11a35-config-data") pod "rabbitmq-cell1-server-0" (UID: "7f05291f-1331-411b-9971-c71218d11a35") : configmap "rabbitmq-cell1-config-data" not found Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.240418 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b3dc5e2c-0729-4f4d-8481-bd8fb0064a80-operator-scripts\") pod \"barbicanf9d3-account-delete-qgkj4\" (UID: \"b3dc5e2c-0729-4f4d-8481-bd8fb0064a80\") " pod="openstack/barbicanf9d3-account-delete-qgkj4" Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.247460 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance4d25-account-delete-pftxq"] Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.258721 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zkmzj\" (UniqueName: \"kubernetes.io/projected/b3dc5e2c-0729-4f4d-8481-bd8fb0064a80-kube-api-access-zkmzj\") pod \"barbicanf9d3-account-delete-qgkj4\" (UID: \"b3dc5e2c-0729-4f4d-8481-bd8fb0064a80\") " pod="openstack/barbicanf9d3-account-delete-qgkj4" Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.271431 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance4d25-account-delete-pftxq" Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.284809 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance4d25-account-delete-pftxq"] Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.300170 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbicanf9d3-account-delete-qgkj4" Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.303115 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-gpxkh"] Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.326675 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-gpxkh"] Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.327759 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b0797697-2b6d-4684-9fe1-e17a91f80369-operator-scripts\") pod \"neutron3cec-account-delete-42tll\" (UID: \"b0797697-2b6d-4684-9fe1-e17a91f80369\") " pod="openstack/neutron3cec-account-delete-42tll" Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.327800 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mjdn6\" (UniqueName: \"kubernetes.io/projected/5b53df32-369f-4a91-bb97-5da067cc3c6a-kube-api-access-mjdn6\") pod \"glance4d25-account-delete-pftxq\" (UID: \"5b53df32-369f-4a91-bb97-5da067cc3c6a\") " pod="openstack/glance4d25-account-delete-pftxq" Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.327907 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s7d82\" (UniqueName: \"kubernetes.io/projected/b0797697-2b6d-4684-9fe1-e17a91f80369-kube-api-access-s7d82\") pod \"neutron3cec-account-delete-42tll\" (UID: \"b0797697-2b6d-4684-9fe1-e17a91f80369\") " pod="openstack/neutron3cec-account-delete-42tll" Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.327955 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5b53df32-369f-4a91-bb97-5da067cc3c6a-operator-scripts\") pod \"glance4d25-account-delete-pftxq\" (UID: \"5b53df32-369f-4a91-bb97-5da067cc3c6a\") " pod="openstack/glance4d25-account-delete-pftxq" Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.342974 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b0797697-2b6d-4684-9fe1-e17a91f80369-operator-scripts\") pod \"neutron3cec-account-delete-42tll\" (UID: \"b0797697-2b6d-4684-9fe1-e17a91f80369\") " pod="openstack/neutron3cec-account-delete-42tll" Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.354196 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.354599 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="ec26620a-6ad8-4792-bb25-543dc31d3be5" containerName="ovn-northd" containerID="cri-o://d3158b8703e1c139eecff816090fc54bf7b1598ce59a6a91d56a6bde613e9529" gracePeriod=30 Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.355111 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="ec26620a-6ad8-4792-bb25-543dc31d3be5" containerName="openstack-network-exporter" containerID="cri-o://ea28ba554ccf3be563e142ef9810c318f1a7398137617c44deec729fa9ddf87d" gracePeriod=30 Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.377349 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-pmjr8"] Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.405142 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s7d82\" (UniqueName: \"kubernetes.io/projected/b0797697-2b6d-4684-9fe1-e17a91f80369-kube-api-access-s7d82\") pod \"neutron3cec-account-delete-42tll\" (UID: \"b0797697-2b6d-4684-9fe1-e17a91f80369\") " pod="openstack/neutron3cec-account-delete-42tll" Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.405202 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-pmjr8"] Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.432082 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinderc68b-account-delete-rphsq"] Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.433351 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinderc68b-account-delete-rphsq" Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.438903 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mjdn6\" (UniqueName: \"kubernetes.io/projected/5b53df32-369f-4a91-bb97-5da067cc3c6a-kube-api-access-mjdn6\") pod \"glance4d25-account-delete-pftxq\" (UID: \"5b53df32-369f-4a91-bb97-5da067cc3c6a\") " pod="openstack/glance4d25-account-delete-pftxq" Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.439029 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5b53df32-369f-4a91-bb97-5da067cc3c6a-operator-scripts\") pod \"glance4d25-account-delete-pftxq\" (UID: \"5b53df32-369f-4a91-bb97-5da067cc3c6a\") " pod="openstack/glance4d25-account-delete-pftxq" Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.440241 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5b53df32-369f-4a91-bb97-5da067cc3c6a-operator-scripts\") pod \"glance4d25-account-delete-pftxq\" (UID: \"5b53df32-369f-4a91-bb97-5da067cc3c6a\") " pod="openstack/glance4d25-account-delete-pftxq" Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.488464 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinderc68b-account-delete-rphsq"] Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.501050 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mjdn6\" (UniqueName: \"kubernetes.io/projected/5b53df32-369f-4a91-bb97-5da067cc3c6a-kube-api-access-mjdn6\") pod \"glance4d25-account-delete-pftxq\" (UID: \"5b53df32-369f-4a91-bb97-5da067cc3c6a\") " pod="openstack/glance4d25-account-delete-pftxq" Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.548708 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jp2bg\" (UniqueName: \"kubernetes.io/projected/10cd5eda-54cc-4c0a-91ca-4f8217e5220e-kube-api-access-jp2bg\") pod \"cinderc68b-account-delete-rphsq\" (UID: \"10cd5eda-54cc-4c0a-91ca-4f8217e5220e\") " pod="openstack/cinderc68b-account-delete-rphsq" Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.548889 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/10cd5eda-54cc-4c0a-91ca-4f8217e5220e-operator-scripts\") pod \"cinderc68b-account-delete-rphsq\" (UID: \"10cd5eda-54cc-4c0a-91ca-4f8217e5220e\") " pod="openstack/cinderc68b-account-delete-rphsq" Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.551194 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron3cec-account-delete-42tll" Nov 26 14:44:02 crc kubenswrapper[5037]: E1126 14:44:02.583603 5037 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 26 14:44:02 crc kubenswrapper[5037]: E1126 14:44:02.583693 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ba78b94a-32d0-4377-ac41-ffd036b241bf-config-data podName:ba78b94a-32d0-4377-ac41-ffd036b241bf nodeName:}" failed. No retries permitted until 2025-11-26 14:44:03.583670448 +0000 UTC m=+1710.380440632 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/ba78b94a-32d0-4377-ac41-ffd036b241bf-config-data") pod "rabbitmq-server-0" (UID: "ba78b94a-32d0-4377-ac41-ffd036b241bf") : configmap "rabbitmq-config-data" not found Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.658772 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance4d25-account-delete-pftxq" Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.659231 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-78lfm"] Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.687255 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/10cd5eda-54cc-4c0a-91ca-4f8217e5220e-operator-scripts\") pod \"cinderc68b-account-delete-rphsq\" (UID: \"10cd5eda-54cc-4c0a-91ca-4f8217e5220e\") " pod="openstack/cinderc68b-account-delete-rphsq" Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.687524 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jp2bg\" (UniqueName: \"kubernetes.io/projected/10cd5eda-54cc-4c0a-91ca-4f8217e5220e-kube-api-access-jp2bg\") pod \"cinderc68b-account-delete-rphsq\" (UID: \"10cd5eda-54cc-4c0a-91ca-4f8217e5220e\") " pod="openstack/cinderc68b-account-delete-rphsq" Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.688580 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/10cd5eda-54cc-4c0a-91ca-4f8217e5220e-operator-scripts\") pod \"cinderc68b-account-delete-rphsq\" (UID: \"10cd5eda-54cc-4c0a-91ca-4f8217e5220e\") " pod="openstack/cinderc68b-account-delete-rphsq" Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.703363 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-78lfm"] Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.718372 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48/ovsdbserver-sb/0.log" Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.718439 5037 generic.go:334] "Generic (PLEG): container finished" podID="ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48" containerID="88d82a9101d1e849e1da1553fa5b16c81210a121618ecb5e38def19cad7dc725" exitCode=2 Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.718463 5037 generic.go:334] "Generic (PLEG): container finished" podID="ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48" containerID="7755a538fc4266a4b6a0966882c3dd065bcfa6c8249a1b1f4b57bd1f36608b7a" exitCode=143 Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.718494 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48","Type":"ContainerDied","Data":"88d82a9101d1e849e1da1553fa5b16c81210a121618ecb5e38def19cad7dc725"} Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.718534 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48","Type":"ContainerDied","Data":"7755a538fc4266a4b6a0966882c3dd065bcfa6c8249a1b1f4b57bd1f36608b7a"} Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.721348 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/novacell12e29-account-delete-vd2zr"] Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.731924 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell12e29-account-delete-vd2zr" Nov 26 14:44:02 crc kubenswrapper[5037]: E1126 14:44:02.790218 5037 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 26 14:44:02 crc kubenswrapper[5037]: E1126 14:44:02.790306 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/7f05291f-1331-411b-9971-c71218d11a35-config-data podName:7f05291f-1331-411b-9971-c71218d11a35 nodeName:}" failed. No retries permitted until 2025-11-26 14:44:03.790268874 +0000 UTC m=+1710.587039058 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/7f05291f-1331-411b-9971-c71218d11a35-config-data") pod "rabbitmq-cell1-server-0" (UID: "7f05291f-1331-411b-9971-c71218d11a35") : configmap "rabbitmq-cell1-config-data" not found Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.803376 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novacell12e29-account-delete-vd2zr"] Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.832720 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ptz2q"] Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.842112 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jp2bg\" (UniqueName: \"kubernetes.io/projected/10cd5eda-54cc-4c0a-91ca-4f8217e5220e-kube-api-access-jp2bg\") pod \"cinderc68b-account-delete-rphsq\" (UID: \"10cd5eda-54cc-4c0a-91ca-4f8217e5220e\") " pod="openstack/cinderc68b-account-delete-rphsq" Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.858635 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-264cs"] Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.888625 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-mgn9v"] Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.899383 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/03ffa609-b428-4a0e-8ec1-5c205391cf7b-operator-scripts\") pod \"novacell12e29-account-delete-vd2zr\" (UID: \"03ffa609-b428-4a0e-8ec1-5c205391cf7b\") " pod="openstack/novacell12e29-account-delete-vd2zr" Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.899425 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v9vm6\" (UniqueName: \"kubernetes.io/projected/03ffa609-b428-4a0e-8ec1-5c205391cf7b-kube-api-access-v9vm6\") pod \"novacell12e29-account-delete-vd2zr\" (UID: \"03ffa609-b428-4a0e-8ec1-5c205391cf7b\") " pod="openstack/novacell12e29-account-delete-vd2zr" Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.942870 5037 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/cinder-scheduler-0" secret="" err="secret \"cinder-cinder-dockercfg-784g4\" not found" Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.963426 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/novacell0e6d6-account-delete-j5w7q"] Nov 26 14:44:02 crc kubenswrapper[5037]: I1126 14:44:02.964789 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell0e6d6-account-delete-j5w7q" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.000994 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-j7ksk"] Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.001241 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-metrics-j7ksk" podUID="20b489db-2066-4222-9131-99da1bd054e3" containerName="openstack-network-exporter" containerID="cri-o://40c2342f25d25bc9e86e10174589a9485aa62cf36b1645c8581d0f0406ccb2fc" gracePeriod=30 Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.003918 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v9vm6\" (UniqueName: \"kubernetes.io/projected/03ffa609-b428-4a0e-8ec1-5c205391cf7b-kube-api-access-v9vm6\") pod \"novacell12e29-account-delete-vd2zr\" (UID: \"03ffa609-b428-4a0e-8ec1-5c205391cf7b\") " pod="openstack/novacell12e29-account-delete-vd2zr" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.006033 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/03ffa609-b428-4a0e-8ec1-5c205391cf7b-operator-scripts\") pod \"novacell12e29-account-delete-vd2zr\" (UID: \"03ffa609-b428-4a0e-8ec1-5c205391cf7b\") " pod="openstack/novacell12e29-account-delete-vd2zr" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.006713 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/03ffa609-b428-4a0e-8ec1-5c205391cf7b-operator-scripts\") pod \"novacell12e29-account-delete-vd2zr\" (UID: \"03ffa609-b428-4a0e-8ec1-5c205391cf7b\") " pod="openstack/novacell12e29-account-delete-vd2zr" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.074412 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-mgn9v"] Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.096990 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v9vm6\" (UniqueName: \"kubernetes.io/projected/03ffa609-b428-4a0e-8ec1-5c205391cf7b-kube-api-access-v9vm6\") pod \"novacell12e29-account-delete-vd2zr\" (UID: \"03ffa609-b428-4a0e-8ec1-5c205391cf7b\") " pod="openstack/novacell12e29-account-delete-vd2zr" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.108006 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bvbqc\" (UniqueName: \"kubernetes.io/projected/d4fd340f-f656-4ec3-aba1-a33eaa58aed0-kube-api-access-bvbqc\") pod \"novacell0e6d6-account-delete-j5w7q\" (UID: \"d4fd340f-f656-4ec3-aba1-a33eaa58aed0\") " pod="openstack/novacell0e6d6-account-delete-j5w7q" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.108194 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d4fd340f-f656-4ec3-aba1-a33eaa58aed0-operator-scripts\") pod \"novacell0e6d6-account-delete-j5w7q\" (UID: \"d4fd340f-f656-4ec3-aba1-a33eaa58aed0\") " pod="openstack/novacell0e6d6-account-delete-j5w7q" Nov 26 14:44:03 crc kubenswrapper[5037]: E1126 14:44:03.108653 5037 secret.go:188] Couldn't get secret openstack/cinder-config-data: secret "cinder-config-data" not found Nov 26 14:44:03 crc kubenswrapper[5037]: E1126 14:44:03.108717 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-config-data podName:fe17b260-d105-4274-88d1-d85fd9948f9f nodeName:}" failed. No retries permitted until 2025-11-26 14:44:03.608699452 +0000 UTC m=+1710.405469626 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-config-data") pod "cinder-scheduler-0" (UID: "fe17b260-d105-4274-88d1-d85fd9948f9f") : secret "cinder-config-data" not found Nov 26 14:44:03 crc kubenswrapper[5037]: E1126 14:44:03.108923 5037 secret.go:188] Couldn't get secret openstack/cinder-scripts: secret "cinder-scripts" not found Nov 26 14:44:03 crc kubenswrapper[5037]: E1126 14:44:03.108952 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-scripts podName:fe17b260-d105-4274-88d1-d85fd9948f9f nodeName:}" failed. No retries permitted until 2025-11-26 14:44:03.608945338 +0000 UTC m=+1710.405715522 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "scripts" (UniqueName: "kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-scripts") pod "cinder-scheduler-0" (UID: "fe17b260-d105-4274-88d1-d85fd9948f9f") : secret "cinder-scripts" not found Nov 26 14:44:03 crc kubenswrapper[5037]: E1126 14:44:03.109860 5037 secret.go:188] Couldn't get secret openstack/cinder-scheduler-config-data: secret "cinder-scheduler-config-data" not found Nov 26 14:44:03 crc kubenswrapper[5037]: E1126 14:44:03.109890 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-config-data-custom podName:fe17b260-d105-4274-88d1-d85fd9948f9f nodeName:}" failed. No retries permitted until 2025-11-26 14:44:03.609880981 +0000 UTC m=+1710.406651165 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data-custom" (UniqueName: "kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-config-data-custom") pod "cinder-scheduler-0" (UID: "fe17b260-d105-4274-88d1-d85fd9948f9f") : secret "cinder-scheduler-config-data" not found Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.133081 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinderc68b-account-delete-rphsq" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.133519 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-jxxp2"] Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.142730 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell12e29-account-delete-vd2zr" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.159222 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-jxxp2"] Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.215129 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novacell0e6d6-account-delete-j5w7q"] Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.216162 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d4fd340f-f656-4ec3-aba1-a33eaa58aed0-operator-scripts\") pod \"novacell0e6d6-account-delete-j5w7q\" (UID: \"d4fd340f-f656-4ec3-aba1-a33eaa58aed0\") " pod="openstack/novacell0e6d6-account-delete-j5w7q" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.223527 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bvbqc\" (UniqueName: \"kubernetes.io/projected/d4fd340f-f656-4ec3-aba1-a33eaa58aed0-kube-api-access-bvbqc\") pod \"novacell0e6d6-account-delete-j5w7q\" (UID: \"d4fd340f-f656-4ec3-aba1-a33eaa58aed0\") " pod="openstack/novacell0e6d6-account-delete-j5w7q" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.223918 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d4fd340f-f656-4ec3-aba1-a33eaa58aed0-operator-scripts\") pod \"novacell0e6d6-account-delete-j5w7q\" (UID: \"d4fd340f-f656-4ec3-aba1-a33eaa58aed0\") " pod="openstack/novacell0e6d6-account-delete-j5w7q" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.259381 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/novaapieb2b-account-delete-988tl"] Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.266164 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novaapieb2b-account-delete-988tl" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.278226 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novaapieb2b-account-delete-988tl"] Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.289156 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bvbqc\" (UniqueName: \"kubernetes.io/projected/d4fd340f-f656-4ec3-aba1-a33eaa58aed0-kube-api-access-bvbqc\") pod \"novacell0e6d6-account-delete-j5w7q\" (UID: \"d4fd340f-f656-4ec3-aba1-a33eaa58aed0\") " pod="openstack/novacell0e6d6-account-delete-j5w7q" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.295849 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55bfb77665-gz5lz"] Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.296079 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-55bfb77665-gz5lz" podUID="257e4b94-6b37-4243-8e8a-6bd47f0a5603" containerName="dnsmasq-dns" containerID="cri-o://084147140f433c529e6be96361e0c147011e55c6ffe26e746fd701df366832bd" gracePeriod=10 Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.359308 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.360471 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89" containerName="openstack-network-exporter" containerID="cri-o://15306a8687a9663850db67213877c281cc0db7db6eb704f63cd32810d22a787d" gracePeriod=300 Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.375040 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell0e6d6-account-delete-j5w7q" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.413964 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-854dc8db7d-j5l6c"] Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.414182 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-854dc8db7d-j5l6c" podUID="c2d75a18-6446-4558-af57-c6e0c957fc3b" containerName="placement-log" containerID="cri-o://98e55e6e1008fca6ad27dbc8db97cf30687f5fe52197409ea9b8d138f9f80df2" gracePeriod=30 Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.416957 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-854dc8db7d-j5l6c" podUID="c2d75a18-6446-4558-af57-c6e0c957fc3b" containerName="placement-api" containerID="cri-o://ae38d038fad3bbc384e79c4d7f1e060c20c2d38b3e29519ec6a7891fc4ff742b" gracePeriod=30 Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.434188 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-7j79r"] Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.436430 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8df4197d-046b-4b35-a14a-b382bda46242-operator-scripts\") pod \"novaapieb2b-account-delete-988tl\" (UID: \"8df4197d-046b-4b35-a14a-b382bda46242\") " pod="openstack/novaapieb2b-account-delete-988tl" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.436509 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w4bs4\" (UniqueName: \"kubernetes.io/projected/8df4197d-046b-4b35-a14a-b382bda46242-kube-api-access-w4bs4\") pod \"novaapieb2b-account-delete-988tl\" (UID: \"8df4197d-046b-4b35-a14a-b382bda46242\") " pod="openstack/novaapieb2b-account-delete-988tl" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.480872 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-7j79r"] Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.532938 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89" containerName="ovsdbserver-nb" containerID="cri-o://a2145e917b1ed177f4eb5739c42f821fe6ce560720e71b9834ad76a33c523409" gracePeriod=300 Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.533627 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-7ccc6df59c-m5tjx"] Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.535941 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-7ccc6df59c-m5tjx" podUID="a97b4f35-04a7-47c3-a658-170645023de6" containerName="neutron-api" containerID="cri-o://25004b7d7570b0227e943b0f10767fefe0da178777c48537fb23de224173d062" gracePeriod=30 Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.536734 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-7ccc6df59c-m5tjx" podUID="a97b4f35-04a7-47c3-a658-170645023de6" containerName="neutron-httpd" containerID="cri-o://d787d7c57b49308ce496dd3022253165f26b5f2096403db68cdd6ea85914b8a9" gracePeriod=30 Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.555363 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8df4197d-046b-4b35-a14a-b382bda46242-operator-scripts\") pod \"novaapieb2b-account-delete-988tl\" (UID: \"8df4197d-046b-4b35-a14a-b382bda46242\") " pod="openstack/novaapieb2b-account-delete-988tl" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.556342 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w4bs4\" (UniqueName: \"kubernetes.io/projected/8df4197d-046b-4b35-a14a-b382bda46242-kube-api-access-w4bs4\") pod \"novaapieb2b-account-delete-988tl\" (UID: \"8df4197d-046b-4b35-a14a-b382bda46242\") " pod="openstack/novaapieb2b-account-delete-988tl" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.579145 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8df4197d-046b-4b35-a14a-b382bda46242-operator-scripts\") pod \"novaapieb2b-account-delete-988tl\" (UID: \"8df4197d-046b-4b35-a14a-b382bda46242\") " pod="openstack/novaapieb2b-account-delete-988tl" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.592655 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-5n698"] Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.600000 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w4bs4\" (UniqueName: \"kubernetes.io/projected/8df4197d-046b-4b35-a14a-b382bda46242-kube-api-access-w4bs4\") pod \"novaapieb2b-account-delete-988tl\" (UID: \"8df4197d-046b-4b35-a14a-b382bda46242\") " pod="openstack/novaapieb2b-account-delete-988tl" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.693300 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-5n698"] Nov 26 14:44:03 crc kubenswrapper[5037]: E1126 14:44:03.697561 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7755a538fc4266a4b6a0966882c3dd065bcfa6c8249a1b1f4b57bd1f36608b7a is running failed: container process not found" containerID="7755a538fc4266a4b6a0966882c3dd065bcfa6c8249a1b1f4b57bd1f36608b7a" cmd=["/usr/bin/pidof","ovsdb-server"] Nov 26 14:44:03 crc kubenswrapper[5037]: E1126 14:44:03.699676 5037 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 26 14:44:03 crc kubenswrapper[5037]: E1126 14:44:03.699727 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ba78b94a-32d0-4377-ac41-ffd036b241bf-config-data podName:ba78b94a-32d0-4377-ac41-ffd036b241bf nodeName:}" failed. No retries permitted until 2025-11-26 14:44:05.699713538 +0000 UTC m=+1712.496483722 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/ba78b94a-32d0-4377-ac41-ffd036b241bf-config-data") pod "rabbitmq-server-0" (UID: "ba78b94a-32d0-4377-ac41-ffd036b241bf") : configmap "rabbitmq-config-data" not found Nov 26 14:44:03 crc kubenswrapper[5037]: E1126 14:44:03.700017 5037 secret.go:188] Couldn't get secret openstack/cinder-config-data: secret "cinder-config-data" not found Nov 26 14:44:03 crc kubenswrapper[5037]: E1126 14:44:03.700040 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-config-data podName:fe17b260-d105-4274-88d1-d85fd9948f9f nodeName:}" failed. No retries permitted until 2025-11-26 14:44:04.700033326 +0000 UTC m=+1711.496803510 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-config-data") pod "cinder-scheduler-0" (UID: "fe17b260-d105-4274-88d1-d85fd9948f9f") : secret "cinder-config-data" not found Nov 26 14:44:03 crc kubenswrapper[5037]: E1126 14:44:03.700071 5037 secret.go:188] Couldn't get secret openstack/cinder-scripts: secret "cinder-scripts" not found Nov 26 14:44:03 crc kubenswrapper[5037]: E1126 14:44:03.700087 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-scripts podName:fe17b260-d105-4274-88d1-d85fd9948f9f nodeName:}" failed. No retries permitted until 2025-11-26 14:44:04.700081977 +0000 UTC m=+1711.496852161 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "scripts" (UniqueName: "kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-scripts") pod "cinder-scheduler-0" (UID: "fe17b260-d105-4274-88d1-d85fd9948f9f") : secret "cinder-scripts" not found Nov 26 14:44:03 crc kubenswrapper[5037]: E1126 14:44:03.700115 5037 secret.go:188] Couldn't get secret openstack/cinder-scheduler-config-data: secret "cinder-scheduler-config-data" not found Nov 26 14:44:03 crc kubenswrapper[5037]: E1126 14:44:03.700132 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-config-data-custom podName:fe17b260-d105-4274-88d1-d85fd9948f9f nodeName:}" failed. No retries permitted until 2025-11-26 14:44:04.700126818 +0000 UTC m=+1711.496897002 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data-custom" (UniqueName: "kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-config-data-custom") pod "cinder-scheduler-0" (UID: "fe17b260-d105-4274-88d1-d85fd9948f9f") : secret "cinder-scheduler-config-data" not found Nov 26 14:44:03 crc kubenswrapper[5037]: E1126 14:44:03.702268 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7755a538fc4266a4b6a0966882c3dd065bcfa6c8249a1b1f4b57bd1f36608b7a is running failed: container process not found" containerID="7755a538fc4266a4b6a0966882c3dd065bcfa6c8249a1b1f4b57bd1f36608b7a" cmd=["/usr/bin/pidof","ovsdb-server"] Nov 26 14:44:03 crc kubenswrapper[5037]: E1126 14:44:03.703954 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7755a538fc4266a4b6a0966882c3dd065bcfa6c8249a1b1f4b57bd1f36608b7a is running failed: container process not found" containerID="7755a538fc4266a4b6a0966882c3dd065bcfa6c8249a1b1f4b57bd1f36608b7a" cmd=["/usr/bin/pidof","ovsdb-server"] Nov 26 14:44:03 crc kubenswrapper[5037]: E1126 14:44:03.704076 5037 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7755a538fc4266a4b6a0966882c3dd065bcfa6c8249a1b1f4b57bd1f36608b7a is running failed: container process not found" probeType="Readiness" pod="openstack/ovsdbserver-sb-0" podUID="ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48" containerName="ovsdbserver-sb" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.746268 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48/ovsdbserver-sb/0.log" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.746350 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.749341 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48/ovsdbserver-sb/0.log" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.749402 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48","Type":"ContainerDied","Data":"1ccbea2d11bd29a6293c0d48ba264de5bbf2e107c52b752d443ef961167fa4a8"} Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.749460 5037 scope.go:117] "RemoveContainer" containerID="88d82a9101d1e849e1da1553fa5b16c81210a121618ecb5e38def19cad7dc725" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.759322 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-j7ksk_20b489db-2066-4222-9131-99da1bd054e3/openstack-network-exporter/0.log" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.759360 5037 generic.go:334] "Generic (PLEG): container finished" podID="20b489db-2066-4222-9131-99da1bd054e3" containerID="40c2342f25d25bc9e86e10174589a9485aa62cf36b1645c8581d0f0406ccb2fc" exitCode=2 Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.759411 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-j7ksk" event={"ID":"20b489db-2066-4222-9131-99da1bd054e3","Type":"ContainerDied","Data":"40c2342f25d25bc9e86e10174589a9485aa62cf36b1645c8581d0f0406ccb2fc"} Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.773713 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89/ovsdbserver-nb/0.log" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.773761 5037 generic.go:334] "Generic (PLEG): container finished" podID="cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89" containerID="15306a8687a9663850db67213877c281cc0db7db6eb704f63cd32810d22a787d" exitCode=2 Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.773784 5037 generic.go:334] "Generic (PLEG): container finished" podID="cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89" containerID="a2145e917b1ed177f4eb5739c42f821fe6ce560720e71b9834ad76a33c523409" exitCode=143 Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.773834 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89","Type":"ContainerDied","Data":"15306a8687a9663850db67213877c281cc0db7db6eb704f63cd32810d22a787d"} Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.773865 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89","Type":"ContainerDied","Data":"a2145e917b1ed177f4eb5739c42f821fe6ce560720e71b9834ad76a33c523409"} Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.786873 5037 generic.go:334] "Generic (PLEG): container finished" podID="c2d75a18-6446-4558-af57-c6e0c957fc3b" containerID="98e55e6e1008fca6ad27dbc8db97cf30687f5fe52197409ea9b8d138f9f80df2" exitCode=143 Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.786963 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-854dc8db7d-j5l6c" event={"ID":"c2d75a18-6446-4558-af57-c6e0c957fc3b","Type":"ContainerDied","Data":"98e55e6e1008fca6ad27dbc8db97cf30687f5fe52197409ea9b8d138f9f80df2"} Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.790437 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement259c-account-delete-rj8qs" event={"ID":"a7ece585-54a5-40d4-866f-98c968f03910","Type":"ContainerStarted","Data":"ea907abbcf61858031d578ef9e3277159eecf177979939e7289a8d1fb98b9156"} Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.800066 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novaapieb2b-account-delete-988tl" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.800184 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-scripts\") pod \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\" (UID: \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\") " Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.800246 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-sb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\" (UID: \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\") " Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.800372 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xsqhm\" (UniqueName: \"kubernetes.io/projected/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-kube-api-access-xsqhm\") pod \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\" (UID: \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\") " Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.800434 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-config\") pod \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\" (UID: \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\") " Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.800480 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-ovsdb-rundir\") pod \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\" (UID: \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\") " Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.800521 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-metrics-certs-tls-certs\") pod \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\" (UID: \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\") " Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.800552 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-combined-ca-bundle\") pod \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\" (UID: \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\") " Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.800601 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-ovsdbserver-sb-tls-certs\") pod \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\" (UID: \"ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48\") " Nov 26 14:44:03 crc kubenswrapper[5037]: E1126 14:44:03.801054 5037 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 26 14:44:03 crc kubenswrapper[5037]: E1126 14:44:03.801110 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/7f05291f-1331-411b-9971-c71218d11a35-config-data podName:7f05291f-1331-411b-9971-c71218d11a35 nodeName:}" failed. No retries permitted until 2025-11-26 14:44:05.801094304 +0000 UTC m=+1712.597864488 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/7f05291f-1331-411b-9971-c71218d11a35-config-data") pod "rabbitmq-cell1-server-0" (UID: "7f05291f-1331-411b-9971-c71218d11a35") : configmap "rabbitmq-cell1-config-data" not found Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.802468 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-scripts" (OuterVolumeSpecName: "scripts") pod "ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48" (UID: "ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.803222 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-config" (OuterVolumeSpecName: "config") pod "ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48" (UID: "ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.810516 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-kube-api-access-xsqhm" (OuterVolumeSpecName: "kube-api-access-xsqhm") pod "ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48" (UID: "ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48"). InnerVolumeSpecName "kube-api-access-xsqhm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.819871 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "ovndbcluster-sb-etc-ovn") pod "ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48" (UID: "ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.823428 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48" (UID: "ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.838692 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-v8rzx"] Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.853850 5037 scope.go:117] "RemoveContainer" containerID="7755a538fc4266a4b6a0966882c3dd065bcfa6c8249a1b1f4b57bd1f36608b7a" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.871822 5037 generic.go:334] "Generic (PLEG): container finished" podID="e7fcafe6-2e7b-4893-84bc-5a3be7029ef7" containerID="963cb889a355aaf5eaa5d102c5937c4f8735b969d035ba5db0079e3607909577" exitCode=137 Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.873185 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-v8rzx"] Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.881189 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.882128 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="account-server" containerID="cri-o://6af1db545967ed1a4d63df5e069cefc5f2002414e3177a1c53b51f7542200023" gracePeriod=30 Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.882670 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="swift-recon-cron" containerID="cri-o://c8f7e68bd6dcee155bb73bde0f7e251636a9f691fa99efe37da6d71e22470060" gracePeriod=30 Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.882735 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="rsync" containerID="cri-o://73c060abeb7573649685e311227f2a579fdf95557d8415f02f112eb7df9fe387" gracePeriod=30 Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.882771 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="object-expirer" containerID="cri-o://ad88500c12de1786aaa4f3cd261187b528c098a8b57abe7c0b3889beed1fd349" gracePeriod=30 Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.882827 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="object-updater" containerID="cri-o://38af5291214696fc2ab5068031bf723126b6ea1a4502cbaf41fec1945bdddb71" gracePeriod=30 Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.882874 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="object-auditor" containerID="cri-o://3af054768a48001311b15006237ee32a28fa31bc5a3ba26f86659bf895c4f0b5" gracePeriod=30 Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.882935 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="object-replicator" containerID="cri-o://b162b6c5fe59ecea9688eeaa61133779a54660d54311beec4f4febf2fc191948" gracePeriod=30 Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.882965 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="object-server" containerID="cri-o://1a454db31dd2243c0baed5f659db3f03cf1284eb320367be8d2eeaee2d9e7140" gracePeriod=30 Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.883017 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="container-updater" containerID="cri-o://dfe76a7230b634adf6aebdf67b296fb27df1714714ebd62003a566a079db4ad7" gracePeriod=30 Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.883085 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="container-auditor" containerID="cri-o://3269f937868b4639b15beb2313a77a6d697a8359d42a1eac21aab99aba4a3441" gracePeriod=30 Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.883123 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="container-replicator" containerID="cri-o://a67a55597dfa0413c7fcfba871c60e4ca78dcc1337e642fa8a730a82b2946f38" gracePeriod=30 Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.883168 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="container-server" containerID="cri-o://64e910dd424738dcc2a6a10dbfc0d43ed55b865d44976cf3ce77949fd94d142f" gracePeriod=30 Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.883197 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="account-reaper" containerID="cri-o://1830d485d70f2c4c16c972d8eb54d3d68060d42e9eb67b0f0be4a183511992c6" gracePeriod=30 Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.883241 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="account-auditor" containerID="cri-o://9ad134020857e5330738626c90a057bc32ca98d01d16f8a94a600086e2df114c" gracePeriod=30 Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.883270 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="account-replicator" containerID="cri-o://b54bd1523c4248a6b946bc2484b15b9a925819b903de19e564491a32a104536e" gracePeriod=30 Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.900540 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.900784 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e" containerName="glance-log" containerID="cri-o://7193d230cf98d6ea21211158364885315519bd51ee3bfb69a0d77702bb8f27cf" gracePeriod=30 Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.901215 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e" containerName="glance-httpd" containerID="cri-o://38c16d94870f4c7e1e940ce264a006c31e02b0edcd3c7f37d2a4b79a7684dec0" gracePeriod=30 Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.906956 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xsqhm\" (UniqueName: \"kubernetes.io/projected/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-kube-api-access-xsqhm\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.906988 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.907000 5037 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.907012 5037 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.907135 5037 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.954476 5037 generic.go:334] "Generic (PLEG): container finished" podID="257e4b94-6b37-4243-8e8a-6bd47f0a5603" containerID="084147140f433c529e6be96361e0c147011e55c6ffe26e746fd701df366832bd" exitCode=0 Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.988819 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2369115c-ae08-42b0-af64-c42191c04502" path="/var/lib/kubelet/pods/2369115c-ae08-42b0-af64-c42191c04502/volumes" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.989654 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="35b26e94-ffdb-4ee2-9940-efa9d8fd74b8" path="/var/lib/kubelet/pods/35b26e94-ffdb-4ee2-9940-efa9d8fd74b8/volumes" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.990181 5037 generic.go:334] "Generic (PLEG): container finished" podID="ec26620a-6ad8-4792-bb25-543dc31d3be5" containerID="ea28ba554ccf3be563e142ef9810c318f1a7398137617c44deec729fa9ddf87d" exitCode=2 Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.990304 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="50b1873a-43ee-426d-99f2-84e8267cb178" path="/var/lib/kubelet/pods/50b1873a-43ee-426d-99f2-84e8267cb178/volumes" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.991525 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5141f29f-7b8b-493c-9d73-398f66ea4ab1" path="/var/lib/kubelet/pods/5141f29f-7b8b-493c-9d73-398f66ea4ab1/volumes" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.992106 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="78b7adc8-c410-4ccf-948a-0d968e60d8b7" path="/var/lib/kubelet/pods/78b7adc8-c410-4ccf-948a-0d968e60d8b7/volumes" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.993082 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e9b6916-0913-445b-8e5d-6a7f397dc9ba" path="/var/lib/kubelet/pods/8e9b6916-0913-445b-8e5d-6a7f397dc9ba/volumes" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.993641 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a48383c9-d4b8-4b6d-8809-3be689c45803" path="/var/lib/kubelet/pods/a48383c9-d4b8-4b6d-8809-3be689c45803/volumes" Nov 26 14:44:03 crc kubenswrapper[5037]: I1126 14:44:03.994721 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a6069f8f-704d-4d3c-8007-0556c1e38b8d" path="/var/lib/kubelet/pods/a6069f8f-704d-4d3c-8007-0556c1e38b8d/volumes" Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.017787 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48" (UID: "ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.112668 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.116767 5037 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.169588 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-ovsdbserver-sb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-sb-tls-certs") pod "ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48" (UID: "ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48"). InnerVolumeSpecName "ovsdbserver-sb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.214404 5037 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.214441 5037 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-ovsdbserver-sb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.231940 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48" (UID: "ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.316267 5037 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:04 crc kubenswrapper[5037]: E1126 14:44:04.330510 5037 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err=< Nov 26 14:44:04 crc kubenswrapper[5037]: command '/usr/local/bin/container-scripts/stop-ovsdb-server.sh' exited with 137: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Nov 26 14:44:04 crc kubenswrapper[5037]: + source /usr/local/bin/container-scripts/functions Nov 26 14:44:04 crc kubenswrapper[5037]: ++ OVNBridge=br-int Nov 26 14:44:04 crc kubenswrapper[5037]: ++ OVNRemote=tcp:localhost:6642 Nov 26 14:44:04 crc kubenswrapper[5037]: ++ OVNEncapType=geneve Nov 26 14:44:04 crc kubenswrapper[5037]: ++ OVNAvailabilityZones= Nov 26 14:44:04 crc kubenswrapper[5037]: ++ EnableChassisAsGateway=true Nov 26 14:44:04 crc kubenswrapper[5037]: ++ PhysicalNetworks= Nov 26 14:44:04 crc kubenswrapper[5037]: ++ OVNHostName= Nov 26 14:44:04 crc kubenswrapper[5037]: ++ DB_FILE=/etc/openvswitch/conf.db Nov 26 14:44:04 crc kubenswrapper[5037]: ++ ovs_dir=/var/lib/openvswitch Nov 26 14:44:04 crc kubenswrapper[5037]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Nov 26 14:44:04 crc kubenswrapper[5037]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Nov 26 14:44:04 crc kubenswrapper[5037]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 26 14:44:04 crc kubenswrapper[5037]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 26 14:44:04 crc kubenswrapper[5037]: + sleep 0.5 Nov 26 14:44:04 crc kubenswrapper[5037]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 26 14:44:04 crc kubenswrapper[5037]: + sleep 0.5 Nov 26 14:44:04 crc kubenswrapper[5037]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 26 14:44:04 crc kubenswrapper[5037]: + cleanup_ovsdb_server_semaphore Nov 26 14:44:04 crc kubenswrapper[5037]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 26 14:44:04 crc kubenswrapper[5037]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Nov 26 14:44:04 crc kubenswrapper[5037]: > execCommand=["/usr/local/bin/container-scripts/stop-ovsdb-server.sh"] containerName="ovsdb-server" pod="openstack/ovn-controller-ovs-264cs" message=< Nov 26 14:44:04 crc kubenswrapper[5037]: Exiting ovsdb-server (5) [ OK ] Nov 26 14:44:04 crc kubenswrapper[5037]: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Nov 26 14:44:04 crc kubenswrapper[5037]: + source /usr/local/bin/container-scripts/functions Nov 26 14:44:04 crc kubenswrapper[5037]: ++ OVNBridge=br-int Nov 26 14:44:04 crc kubenswrapper[5037]: ++ OVNRemote=tcp:localhost:6642 Nov 26 14:44:04 crc kubenswrapper[5037]: ++ OVNEncapType=geneve Nov 26 14:44:04 crc kubenswrapper[5037]: ++ OVNAvailabilityZones= Nov 26 14:44:04 crc kubenswrapper[5037]: ++ EnableChassisAsGateway=true Nov 26 14:44:04 crc kubenswrapper[5037]: ++ PhysicalNetworks= Nov 26 14:44:04 crc kubenswrapper[5037]: ++ OVNHostName= Nov 26 14:44:04 crc kubenswrapper[5037]: ++ DB_FILE=/etc/openvswitch/conf.db Nov 26 14:44:04 crc kubenswrapper[5037]: ++ ovs_dir=/var/lib/openvswitch Nov 26 14:44:04 crc kubenswrapper[5037]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Nov 26 14:44:04 crc kubenswrapper[5037]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Nov 26 14:44:04 crc kubenswrapper[5037]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 26 14:44:04 crc kubenswrapper[5037]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 26 14:44:04 crc kubenswrapper[5037]: + sleep 0.5 Nov 26 14:44:04 crc kubenswrapper[5037]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 26 14:44:04 crc kubenswrapper[5037]: + sleep 0.5 Nov 26 14:44:04 crc kubenswrapper[5037]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 26 14:44:04 crc kubenswrapper[5037]: + cleanup_ovsdb_server_semaphore Nov 26 14:44:04 crc kubenswrapper[5037]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 26 14:44:04 crc kubenswrapper[5037]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Nov 26 14:44:04 crc kubenswrapper[5037]: > Nov 26 14:44:04 crc kubenswrapper[5037]: E1126 14:44:04.330554 5037 kuberuntime_container.go:691] "PreStop hook failed" err=< Nov 26 14:44:04 crc kubenswrapper[5037]: command '/usr/local/bin/container-scripts/stop-ovsdb-server.sh' exited with 137: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Nov 26 14:44:04 crc kubenswrapper[5037]: + source /usr/local/bin/container-scripts/functions Nov 26 14:44:04 crc kubenswrapper[5037]: ++ OVNBridge=br-int Nov 26 14:44:04 crc kubenswrapper[5037]: ++ OVNRemote=tcp:localhost:6642 Nov 26 14:44:04 crc kubenswrapper[5037]: ++ OVNEncapType=geneve Nov 26 14:44:04 crc kubenswrapper[5037]: ++ OVNAvailabilityZones= Nov 26 14:44:04 crc kubenswrapper[5037]: ++ EnableChassisAsGateway=true Nov 26 14:44:04 crc kubenswrapper[5037]: ++ PhysicalNetworks= Nov 26 14:44:04 crc kubenswrapper[5037]: ++ OVNHostName= Nov 26 14:44:04 crc kubenswrapper[5037]: ++ DB_FILE=/etc/openvswitch/conf.db Nov 26 14:44:04 crc kubenswrapper[5037]: ++ ovs_dir=/var/lib/openvswitch Nov 26 14:44:04 crc kubenswrapper[5037]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Nov 26 14:44:04 crc kubenswrapper[5037]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Nov 26 14:44:04 crc kubenswrapper[5037]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 26 14:44:04 crc kubenswrapper[5037]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 26 14:44:04 crc kubenswrapper[5037]: + sleep 0.5 Nov 26 14:44:04 crc kubenswrapper[5037]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 26 14:44:04 crc kubenswrapper[5037]: + sleep 0.5 Nov 26 14:44:04 crc kubenswrapper[5037]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 26 14:44:04 crc kubenswrapper[5037]: + cleanup_ovsdb_server_semaphore Nov 26 14:44:04 crc kubenswrapper[5037]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 26 14:44:04 crc kubenswrapper[5037]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Nov 26 14:44:04 crc kubenswrapper[5037]: > pod="openstack/ovn-controller-ovs-264cs" podUID="80ce8a9a-aa28-40e4-ac35-c7d379224208" containerName="ovsdb-server" containerID="cri-o://1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff" Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.330590 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-264cs" podUID="80ce8a9a-aa28-40e4-ac35-c7d379224208" containerName="ovsdb-server" containerID="cri-o://1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff" gracePeriod=29 Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.381365 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-264cs" podUID="80ce8a9a-aa28-40e4-ac35-c7d379224208" containerName="ovs-vswitchd" containerID="cri-o://dc60cd871e55f538b7db49d446338e526894553aaa076fdbe1e2f04a853fb486" gracePeriod=29 Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.465351 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55bfb77665-gz5lz" event={"ID":"257e4b94-6b37-4243-8e8a-6bd47f0a5603","Type":"ContainerDied","Data":"084147140f433c529e6be96361e0c147011e55c6ffe26e746fd701df366832bd"} Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.465393 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement259c-account-delete-rj8qs"] Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.465408 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.465426 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.465438 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"ec26620a-6ad8-4792-bb25-543dc31d3be5","Type":"ContainerDied","Data":"ea28ba554ccf3be563e142ef9810c318f1a7398137617c44deec729fa9ddf87d"} Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.465450 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.465461 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.465471 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-7f55999cfc-jx9r6"] Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.465484 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.465497 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-7c767587b5-nzlv9"] Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.465583 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-7ddc4956b6-dfqsv"] Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.465691 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-7978b45fdd-7t6zc"] Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.465703 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.465715 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.465725 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.465738 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.465748 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-j65gt"] Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.465759 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-j65gt"] Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.465771 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-2e29-account-create-update-lrvct"] Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.465779 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell12e29-account-delete-vd2zr"] Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.465790 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-2e29-account-create-update-lrvct"] Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.465800 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.468102 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-7978b45fdd-7t6zc" podUID="334f3bb7-793e-4cff-b0ef-de24dc8a46b5" containerName="barbican-api-log" containerID="cri-o://b1db0ccf747c065689c039923c194e8419b6cd5a8c76ec974b99511a7ede0d79" gracePeriod=30 Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.468164 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="9c3c49ff-cf53-4b5b-ba83-10877d499763" containerName="nova-scheduler-scheduler" containerID="cri-o://b37dad75bebb2e8ff92fd84a2c83e4b7a2ff235be32f88191ebf7baf5089d611" gracePeriod=30 Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.468192 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-7978b45fdd-7t6zc" podUID="334f3bb7-793e-4cff-b0ef-de24dc8a46b5" containerName="barbican-api" containerID="cri-o://2f6e30bd74ea66c491e2959c075dfac83aa041657baf37104388b43c5d325007" gracePeriod=30 Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.468327 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="4408c030-a5ac-49ae-9361-54cbe3c27108" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://4f57619ebc65ee19c82e274478cdb8f19dd8e02a6b90642fbf2271294bdfb236" gracePeriod=30 Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.468537 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-7f55999cfc-jx9r6" podUID="aed636f4-272c-4379-a6f3-8247ae0e46cc" containerName="proxy-httpd" containerID="cri-o://ac124cedaed73284f9bbf36277718f97b7c752f739fd563062cca9f2857a6274" gracePeriod=30 Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.468635 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="fe17b260-d105-4274-88d1-d85fd9948f9f" containerName="cinder-scheduler" containerID="cri-o://a141207a0fff58064f3407d6c288ff7903f292bd3e192081eb2a010bd7fcf95d" gracePeriod=30 Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.468719 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-7f55999cfc-jx9r6" podUID="aed636f4-272c-4379-a6f3-8247ae0e46cc" containerName="proxy-server" containerID="cri-o://2019faaa0d00a4dce9f4ce3484825a1f6132bcb7d194dfe843a8fd57678e6f7d" gracePeriod=30 Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.468820 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="07720f90-b6f7-4b81-9c32-17f1e72b19fa" containerName="cinder-api-log" containerID="cri-o://1ccf73ea43e62a2d000418194aef023e26ee721280485b1329df2b411c630259" gracePeriod=30 Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.468868 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="fe17b260-d105-4274-88d1-d85fd9948f9f" containerName="probe" containerID="cri-o://140e7be2182c285f86914d1d0349ab0f880704f06b09bd28f8522e6957b1e06c" gracePeriod=30 Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.468941 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-7c767587b5-nzlv9" podUID="6d49cc40-ce20-415f-a979-398430c2bd81" containerName="barbican-worker-log" containerID="cri-o://88558c083c5cd020dbbbc7911d8c1ff0846d988d99c33563252e02c9bde2f0cf" gracePeriod=30 Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.468981 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="07720f90-b6f7-4b81-9c32-17f1e72b19fa" containerName="cinder-api" containerID="cri-o://6c728b7a4bd6db17ff62032233cd9d220168f2c76bace60a7590b7b669f9d433" gracePeriod=30 Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.469061 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-7ddc4956b6-dfqsv" podUID="19ae84d4-26f8-4e11-bd01-da880def5547" containerName="barbican-keystone-listener-log" containerID="cri-o://355cc9901e399458175cd4640ef40324803629a86ea9a4d2abc2824da07c4f8d" gracePeriod=30 Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.469097 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-7c767587b5-nzlv9" podUID="6d49cc40-ce20-415f-a979-398430c2bd81" containerName="barbican-worker" containerID="cri-o://08aa4f4dbe17185b559c1307060da7ba09ed7694916c81cee021536293b3f886" gracePeriod=30 Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.469180 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="cff988a9-69e2-42cc-a456-426f13be8a58" containerName="nova-api-log" containerID="cri-o://852040b491cc42295268755c4a7220816c3a15eb3bae51127b18b8351d773e4d" gracePeriod=30 Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.469201 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-7ddc4956b6-dfqsv" podUID="19ae84d4-26f8-4e11-bd01-da880def5547" containerName="barbican-keystone-listener" containerID="cri-o://73ec95358a687154b2f7af7ab67ff687aabcbeb867fdaa97bcb29864cc40d8c1" gracePeriod=30 Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.469268 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e" containerName="nova-metadata-log" containerID="cri-o://d79abe361aef8985708638422b648d6c91d88cc2db1ffbf2d1c043eb4548ba88" gracePeriod=30 Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.469683 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="693d1a99-bf33-42ee-adea-2f8ce0f6c002" containerName="glance-log" containerID="cri-o://b49e79776c4c1720c6692646b9ec69e22007400cd025b41070ff9a874d805f29" gracePeriod=30 Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.469762 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="693d1a99-bf33-42ee-adea-2f8ce0f6c002" containerName="glance-httpd" containerID="cri-o://69f2b0b56cf2f3be40f2a859173b22b913b09b1aec2185348206ae5ef68d4747" gracePeriod=30 Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.469842 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e" containerName="nova-metadata-metadata" containerID="cri-o://33dcd0b34b2f2fdf22fdb535aa2524ac7c392d11aebfe3891b1a520355c97e29" gracePeriod=30 Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.469906 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="cff988a9-69e2-42cc-a456-426f13be8a58" containerName="nova-api-api" containerID="cri-o://cb5de0febf4f6869c6113a77abea3425966e60873437776dd7f265ea84cd9709" gracePeriod=30 Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.537665 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-j7ksk_20b489db-2066-4222-9131-99da1bd054e3/openstack-network-exporter/0.log" Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.537774 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-j7ksk" Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.635904 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55bfb77665-gz5lz" Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.651466 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/20b489db-2066-4222-9131-99da1bd054e3-ovn-rundir\") pod \"20b489db-2066-4222-9131-99da1bd054e3\" (UID: \"20b489db-2066-4222-9131-99da1bd054e3\") " Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.651593 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bg26k\" (UniqueName: \"kubernetes.io/projected/20b489db-2066-4222-9131-99da1bd054e3-kube-api-access-bg26k\") pod \"20b489db-2066-4222-9131-99da1bd054e3\" (UID: \"20b489db-2066-4222-9131-99da1bd054e3\") " Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.651627 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20b489db-2066-4222-9131-99da1bd054e3-config\") pod \"20b489db-2066-4222-9131-99da1bd054e3\" (UID: \"20b489db-2066-4222-9131-99da1bd054e3\") " Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.651686 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/20b489db-2066-4222-9131-99da1bd054e3-metrics-certs-tls-certs\") pod \"20b489db-2066-4222-9131-99da1bd054e3\" (UID: \"20b489db-2066-4222-9131-99da1bd054e3\") " Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.651723 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20b489db-2066-4222-9131-99da1bd054e3-combined-ca-bundle\") pod \"20b489db-2066-4222-9131-99da1bd054e3\" (UID: \"20b489db-2066-4222-9131-99da1bd054e3\") " Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.651820 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/20b489db-2066-4222-9131-99da1bd054e3-ovs-rundir\") pod \"20b489db-2066-4222-9131-99da1bd054e3\" (UID: \"20b489db-2066-4222-9131-99da1bd054e3\") " Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.653981 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/20b489db-2066-4222-9131-99da1bd054e3-config" (OuterVolumeSpecName: "config") pod "20b489db-2066-4222-9131-99da1bd054e3" (UID: "20b489db-2066-4222-9131-99da1bd054e3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.654066 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/20b489db-2066-4222-9131-99da1bd054e3-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "20b489db-2066-4222-9131-99da1bd054e3" (UID: "20b489db-2066-4222-9131-99da1bd054e3"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.655079 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-5kqgs"] Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.657489 5037 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/20b489db-2066-4222-9131-99da1bd054e3-ovn-rundir\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.657515 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20b489db-2066-4222-9131-99da1bd054e3-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.666031 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/20b489db-2066-4222-9131-99da1bd054e3-ovs-rundir" (OuterVolumeSpecName: "ovs-rundir") pod "20b489db-2066-4222-9131-99da1bd054e3" (UID: "20b489db-2066-4222-9131-99da1bd054e3"). InnerVolumeSpecName "ovs-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.667596 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b489db-2066-4222-9131-99da1bd054e3-kube-api-access-bg26k" (OuterVolumeSpecName: "kube-api-access-bg26k") pod "20b489db-2066-4222-9131-99da1bd054e3" (UID: "20b489db-2066-4222-9131-99da1bd054e3"). InnerVolumeSpecName "kube-api-access-bg26k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.728895 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-5kqgs"] Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.735350 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="7f05291f-1331-411b-9971-c71218d11a35" containerName="rabbitmq" containerID="cri-o://4483535c43e875eaf8b876f0ce67748ccef6e9a8c9dba169d9e8c3b8043014ae" gracePeriod=604800 Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.735470 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="ba78b94a-32d0-4377-ac41-ffd036b241bf" containerName="rabbitmq" containerID="cri-o://74f68fdb96d374b8d9906137608e1412d3d306ce1e1daedf2234bd65a15de9cc" gracePeriod=604800 Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.735516 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.735670 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="8707a232-f648-4795-b250-d29069f26514" containerName="nova-cell0-conductor-conductor" containerID="cri-o://b658272682462e675abb3613cfedbdb070fda6a2ec653d911c6d60b0faa08bd2" gracePeriod=30 Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.756122 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-pthqz"] Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.757517 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89/ovsdbserver-nb/0.log" Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.757582 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.758603 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/257e4b94-6b37-4243-8e8a-6bd47f0a5603-dns-swift-storage-0\") pod \"257e4b94-6b37-4243-8e8a-6bd47f0a5603\" (UID: \"257e4b94-6b37-4243-8e8a-6bd47f0a5603\") " Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.758711 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/257e4b94-6b37-4243-8e8a-6bd47f0a5603-ovsdbserver-sb\") pod \"257e4b94-6b37-4243-8e8a-6bd47f0a5603\" (UID: \"257e4b94-6b37-4243-8e8a-6bd47f0a5603\") " Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.758840 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/257e4b94-6b37-4243-8e8a-6bd47f0a5603-dns-svc\") pod \"257e4b94-6b37-4243-8e8a-6bd47f0a5603\" (UID: \"257e4b94-6b37-4243-8e8a-6bd47f0a5603\") " Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.758911 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gsj2d\" (UniqueName: \"kubernetes.io/projected/257e4b94-6b37-4243-8e8a-6bd47f0a5603-kube-api-access-gsj2d\") pod \"257e4b94-6b37-4243-8e8a-6bd47f0a5603\" (UID: \"257e4b94-6b37-4243-8e8a-6bd47f0a5603\") " Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.758954 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/257e4b94-6b37-4243-8e8a-6bd47f0a5603-config\") pod \"257e4b94-6b37-4243-8e8a-6bd47f0a5603\" (UID: \"257e4b94-6b37-4243-8e8a-6bd47f0a5603\") " Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.758976 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/257e4b94-6b37-4243-8e8a-6bd47f0a5603-ovsdbserver-nb\") pod \"257e4b94-6b37-4243-8e8a-6bd47f0a5603\" (UID: \"257e4b94-6b37-4243-8e8a-6bd47f0a5603\") " Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.761800 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bg26k\" (UniqueName: \"kubernetes.io/projected/20b489db-2066-4222-9131-99da1bd054e3-kube-api-access-bg26k\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.761826 5037 reconciler_common.go:293] "Volume detached for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/20b489db-2066-4222-9131-99da1bd054e3-ovs-rundir\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:04 crc kubenswrapper[5037]: E1126 14:44:04.761910 5037 secret.go:188] Couldn't get secret openstack/cinder-scripts: secret "cinder-scripts" not found Nov 26 14:44:04 crc kubenswrapper[5037]: E1126 14:44:04.761953 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-scripts podName:fe17b260-d105-4274-88d1-d85fd9948f9f nodeName:}" failed. No retries permitted until 2025-11-26 14:44:06.761939504 +0000 UTC m=+1713.558709688 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "scripts" (UniqueName: "kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-scripts") pod "cinder-scheduler-0" (UID: "fe17b260-d105-4274-88d1-d85fd9948f9f") : secret "cinder-scripts" not found Nov 26 14:44:04 crc kubenswrapper[5037]: E1126 14:44:04.762281 5037 secret.go:188] Couldn't get secret openstack/cinder-scheduler-config-data: secret "cinder-scheduler-config-data" not found Nov 26 14:44:04 crc kubenswrapper[5037]: E1126 14:44:04.762325 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-config-data-custom podName:fe17b260-d105-4274-88d1-d85fd9948f9f nodeName:}" failed. No retries permitted until 2025-11-26 14:44:06.762318103 +0000 UTC m=+1713.559088287 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data-custom" (UniqueName: "kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-config-data-custom") pod "cinder-scheduler-0" (UID: "fe17b260-d105-4274-88d1-d85fd9948f9f") : secret "cinder-scheduler-config-data" not found Nov 26 14:44:04 crc kubenswrapper[5037]: E1126 14:44:04.762359 5037 secret.go:188] Couldn't get secret openstack/cinder-config-data: secret "cinder-config-data" not found Nov 26 14:44:04 crc kubenswrapper[5037]: E1126 14:44:04.762379 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-config-data podName:fe17b260-d105-4274-88d1-d85fd9948f9f nodeName:}" failed. No retries permitted until 2025-11-26 14:44:06.762373354 +0000 UTC m=+1713.559143538 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-config-data") pod "cinder-scheduler-0" (UID: "fe17b260-d105-4274-88d1-d85fd9948f9f") : secret "cinder-config-data" not found Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.771469 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-pthqz"] Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.787934 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.788171 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-conductor-0" podUID="dd47ce65-1426-47e2-a5d1-6efd83bac3ab" containerName="nova-cell1-conductor-conductor" containerID="cri-o://cafd25254996ab2af2a3389cdf0cdcd2a0d515e80e24d59e43ba4b1e34bf696b" gracePeriod=30 Nov 26 14:44:04 crc kubenswrapper[5037]: I1126 14:44:04.793127 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/257e4b94-6b37-4243-8e8a-6bd47f0a5603-kube-api-access-gsj2d" (OuterVolumeSpecName: "kube-api-access-gsj2d") pod "257e4b94-6b37-4243-8e8a-6bd47f0a5603" (UID: "257e4b94-6b37-4243-8e8a-6bd47f0a5603"). InnerVolumeSpecName "kube-api-access-gsj2d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:04.822919 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 26 14:44:06 crc kubenswrapper[5037]: E1126 14:44:04.839514 5037 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod10886f85_c800_4999_8c79_c490c60696cc.slice/crio-1a454db31dd2243c0baed5f659db3f03cf1284eb320367be8d2eeaee2d9e7140.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda97b4f35_04a7_47c3_a658_170645023de6.slice/crio-d787d7c57b49308ce496dd3022253165f26b5f2096403db68cdd6ea85914b8a9.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod10886f85_c800_4999_8c79_c490c60696cc.slice/crio-b162b6c5fe59ecea9688eeaa61133779a54660d54311beec4f4febf2fc191948.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcff988a9_69e2_42cc_a456_426f13be8a58.slice/crio-conmon-852040b491cc42295268755c4a7220816c3a15eb3bae51127b18b8351d773e4d.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod19ae84d4_26f8_4e11_bd01_da880def5547.slice/crio-conmon-355cc9901e399458175cd4640ef40324803629a86ea9a4d2abc2824da07c4f8d.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod10886f85_c800_4999_8c79_c490c60696cc.slice/crio-conmon-ad88500c12de1786aaa4f3cd261187b528c098a8b57abe7c0b3889beed1fd349.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod10886f85_c800_4999_8c79_c490c60696cc.slice/crio-conmon-dfe76a7230b634adf6aebdf67b296fb27df1714714ebd62003a566a079db4ad7.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod10886f85_c800_4999_8c79_c490c60696cc.slice/crio-conmon-3af054768a48001311b15006237ee32a28fa31bc5a3ba26f86659bf895c4f0b5.scope\": RecentStats: unable to find data in memory cache]" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:04.863898 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7fcafe6-2e7b-4893-84bc-5a3be7029ef7-combined-ca-bundle\") pod \"e7fcafe6-2e7b-4893-84bc-5a3be7029ef7\" (UID: \"e7fcafe6-2e7b-4893-84bc-5a3be7029ef7\") " Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:04.863951 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-ovsdb-rundir\") pod \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\" (UID: \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\") " Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:04.864047 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-config\") pod \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\" (UID: \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\") " Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:04.866938 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89" (UID: "cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:04.867599 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-config" (OuterVolumeSpecName: "config") pod "cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89" (UID: "cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:04.867663 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e7fcafe6-2e7b-4893-84bc-5a3be7029ef7-openstack-config\") pod \"e7fcafe6-2e7b-4893-84bc-5a3be7029ef7\" (UID: \"e7fcafe6-2e7b-4893-84bc-5a3be7029ef7\") " Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:04.868247 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2z7vp\" (UniqueName: \"kubernetes.io/projected/e7fcafe6-2e7b-4893-84bc-5a3be7029ef7-kube-api-access-2z7vp\") pod \"e7fcafe6-2e7b-4893-84bc-5a3be7029ef7\" (UID: \"e7fcafe6-2e7b-4893-84bc-5a3be7029ef7\") " Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:04.868310 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-metrics-certs-tls-certs\") pod \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\" (UID: \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\") " Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:04.868335 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-ovsdbserver-nb-tls-certs\") pod \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\" (UID: \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\") " Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:04.868361 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6bsk4\" (UniqueName: \"kubernetes.io/projected/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-kube-api-access-6bsk4\") pod \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\" (UID: \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\") " Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:04.868404 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-nb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\" (UID: \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\") " Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:04.868421 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-scripts\") pod \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\" (UID: \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\") " Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:04.868462 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-combined-ca-bundle\") pod \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\" (UID: \"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89\") " Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:04.868490 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e7fcafe6-2e7b-4893-84bc-5a3be7029ef7-openstack-config-secret\") pod \"e7fcafe6-2e7b-4893-84bc-5a3be7029ef7\" (UID: \"e7fcafe6-2e7b-4893-84bc-5a3be7029ef7\") " Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:04.869135 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gsj2d\" (UniqueName: \"kubernetes.io/projected/257e4b94-6b37-4243-8e8a-6bd47f0a5603-kube-api-access-gsj2d\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:04.869146 5037 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:04.869155 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:04.870313 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-scripts" (OuterVolumeSpecName: "scripts") pod "cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89" (UID: "cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:04.897489 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "ovndbcluster-nb-etc-ovn") pod "cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89" (UID: "cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:04.898388 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7fcafe6-2e7b-4893-84bc-5a3be7029ef7-kube-api-access-2z7vp" (OuterVolumeSpecName: "kube-api-access-2z7vp") pod "e7fcafe6-2e7b-4893-84bc-5a3be7029ef7" (UID: "e7fcafe6-2e7b-4893-84bc-5a3be7029ef7"). InnerVolumeSpecName "kube-api-access-2z7vp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:04.902737 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-kube-api-access-6bsk4" (OuterVolumeSpecName: "kube-api-access-6bsk4") pod "cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89" (UID: "cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89"). InnerVolumeSpecName "kube-api-access-6bsk4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:04.970847 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2z7vp\" (UniqueName: \"kubernetes.io/projected/e7fcafe6-2e7b-4893-84bc-5a3be7029ef7-kube-api-access-2z7vp\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:04.971144 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6bsk4\" (UniqueName: \"kubernetes.io/projected/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-kube-api-access-6bsk4\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:04.971163 5037 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:04.971172 5037 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:04.994585 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/257e4b94-6b37-4243-8e8a-6bd47f0a5603-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "257e4b94-6b37-4243-8e8a-6bd47f0a5603" (UID: "257e4b94-6b37-4243-8e8a-6bd47f0a5603"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.014741 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-cell1-galera-0" podUID="300dce8f-4337-4707-8075-f32b93f03e4f" containerName="galera" containerID="cri-o://b153954d737c10034799ce4a540d151fbb5420d7fc983a9615870845e76fb0be" gracePeriod=30 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.022909 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b489db-2066-4222-9131-99da1bd054e3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "20b489db-2066-4222-9131-99da1bd054e3" (UID: "20b489db-2066-4222-9131-99da1bd054e3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.024172 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/257e4b94-6b37-4243-8e8a-6bd47f0a5603-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "257e4b94-6b37-4243-8e8a-6bd47f0a5603" (UID: "257e4b94-6b37-4243-8e8a-6bd47f0a5603"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.025574 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-j7ksk_20b489db-2066-4222-9131-99da1bd054e3/openstack-network-exporter/0.log" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.025672 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-j7ksk" event={"ID":"20b489db-2066-4222-9131-99da1bd054e3","Type":"ContainerDied","Data":"74e3e529f3af67fb4aaec99f6c27730587b3ddc393d2f7e77e75f62a7c4ebfa6"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.025703 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-j7ksk" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.025706 5037 scope.go:117] "RemoveContainer" containerID="40c2342f25d25bc9e86e10174589a9485aa62cf36b1645c8581d0f0406ccb2fc" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.025907 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron3cec-account-delete-42tll"] Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.029050 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.051821 5037 generic.go:334] "Generic (PLEG): container finished" podID="693d1a99-bf33-42ee-adea-2f8ce0f6c002" containerID="b49e79776c4c1720c6692646b9ec69e22007400cd025b41070ff9a874d805f29" exitCode=143 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.051891 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"693d1a99-bf33-42ee-adea-2f8ce0f6c002","Type":"ContainerDied","Data":"b49e79776c4c1720c6692646b9ec69e22007400cd025b41070ff9a874d805f29"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.056944 5037 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.063414 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance4d25-account-delete-pftxq"] Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.074039 5037 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/257e4b94-6b37-4243-8e8a-6bd47f0a5603-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.074061 5037 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.074073 5037 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/257e4b94-6b37-4243-8e8a-6bd47f0a5603-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.074081 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20b489db-2066-4222-9131-99da1bd054e3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.074059 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7978b45fdd-7t6zc" event={"ID":"334f3bb7-793e-4cff-b0ef-de24dc8a46b5","Type":"ContainerDied","Data":"b1db0ccf747c065689c039923c194e8419b6cd5a8c76ec974b99511a7ede0d79"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.074036 5037 generic.go:334] "Generic (PLEG): container finished" podID="334f3bb7-793e-4cff-b0ef-de24dc8a46b5" containerID="b1db0ccf747c065689c039923c194e8419b6cd5a8c76ec974b99511a7ede0d79" exitCode=143 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.076973 5037 generic.go:334] "Generic (PLEG): container finished" podID="cff988a9-69e2-42cc-a456-426f13be8a58" containerID="852040b491cc42295268755c4a7220816c3a15eb3bae51127b18b8351d773e4d" exitCode=143 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.077042 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cff988a9-69e2-42cc-a456-426f13be8a58","Type":"ContainerDied","Data":"852040b491cc42295268755c4a7220816c3a15eb3bae51127b18b8351d773e4d"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.081943 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbicanf9d3-account-delete-qgkj4"] Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.094166 5037 generic.go:334] "Generic (PLEG): container finished" podID="10886f85-c800-4999-8c79-c490c60696cc" containerID="73c060abeb7573649685e311227f2a579fdf95557d8415f02f112eb7df9fe387" exitCode=0 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.094215 5037 generic.go:334] "Generic (PLEG): container finished" podID="10886f85-c800-4999-8c79-c490c60696cc" containerID="ad88500c12de1786aaa4f3cd261187b528c098a8b57abe7c0b3889beed1fd349" exitCode=0 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.094223 5037 generic.go:334] "Generic (PLEG): container finished" podID="10886f85-c800-4999-8c79-c490c60696cc" containerID="38af5291214696fc2ab5068031bf723126b6ea1a4502cbaf41fec1945bdddb71" exitCode=0 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.094231 5037 generic.go:334] "Generic (PLEG): container finished" podID="10886f85-c800-4999-8c79-c490c60696cc" containerID="3af054768a48001311b15006237ee32a28fa31bc5a3ba26f86659bf895c4f0b5" exitCode=0 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.094238 5037 generic.go:334] "Generic (PLEG): container finished" podID="10886f85-c800-4999-8c79-c490c60696cc" containerID="b162b6c5fe59ecea9688eeaa61133779a54660d54311beec4f4febf2fc191948" exitCode=0 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.094246 5037 generic.go:334] "Generic (PLEG): container finished" podID="10886f85-c800-4999-8c79-c490c60696cc" containerID="1a454db31dd2243c0baed5f659db3f03cf1284eb320367be8d2eeaee2d9e7140" exitCode=0 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.094253 5037 generic.go:334] "Generic (PLEG): container finished" podID="10886f85-c800-4999-8c79-c490c60696cc" containerID="dfe76a7230b634adf6aebdf67b296fb27df1714714ebd62003a566a079db4ad7" exitCode=0 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.094259 5037 generic.go:334] "Generic (PLEG): container finished" podID="10886f85-c800-4999-8c79-c490c60696cc" containerID="3269f937868b4639b15beb2313a77a6d697a8359d42a1eac21aab99aba4a3441" exitCode=0 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.094265 5037 generic.go:334] "Generic (PLEG): container finished" podID="10886f85-c800-4999-8c79-c490c60696cc" containerID="a67a55597dfa0413c7fcfba871c60e4ca78dcc1337e642fa8a730a82b2946f38" exitCode=0 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.094305 5037 generic.go:334] "Generic (PLEG): container finished" podID="10886f85-c800-4999-8c79-c490c60696cc" containerID="64e910dd424738dcc2a6a10dbfc0d43ed55b865d44976cf3ce77949fd94d142f" exitCode=0 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.094315 5037 generic.go:334] "Generic (PLEG): container finished" podID="10886f85-c800-4999-8c79-c490c60696cc" containerID="1830d485d70f2c4c16c972d8eb54d3d68060d42e9eb67b0f0be4a183511992c6" exitCode=0 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.094321 5037 generic.go:334] "Generic (PLEG): container finished" podID="10886f85-c800-4999-8c79-c490c60696cc" containerID="9ad134020857e5330738626c90a057bc32ca98d01d16f8a94a600086e2df114c" exitCode=0 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.094328 5037 generic.go:334] "Generic (PLEG): container finished" podID="10886f85-c800-4999-8c79-c490c60696cc" containerID="b54bd1523c4248a6b946bc2484b15b9a925819b903de19e564491a32a104536e" exitCode=0 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.094334 5037 generic.go:334] "Generic (PLEG): container finished" podID="10886f85-c800-4999-8c79-c490c60696cc" containerID="6af1db545967ed1a4d63df5e069cefc5f2002414e3177a1c53b51f7542200023" exitCode=0 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.094419 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"10886f85-c800-4999-8c79-c490c60696cc","Type":"ContainerDied","Data":"73c060abeb7573649685e311227f2a579fdf95557d8415f02f112eb7df9fe387"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.094470 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"10886f85-c800-4999-8c79-c490c60696cc","Type":"ContainerDied","Data":"ad88500c12de1786aaa4f3cd261187b528c098a8b57abe7c0b3889beed1fd349"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.094484 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"10886f85-c800-4999-8c79-c490c60696cc","Type":"ContainerDied","Data":"38af5291214696fc2ab5068031bf723126b6ea1a4502cbaf41fec1945bdddb71"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.094494 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"10886f85-c800-4999-8c79-c490c60696cc","Type":"ContainerDied","Data":"3af054768a48001311b15006237ee32a28fa31bc5a3ba26f86659bf895c4f0b5"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.094503 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"10886f85-c800-4999-8c79-c490c60696cc","Type":"ContainerDied","Data":"b162b6c5fe59ecea9688eeaa61133779a54660d54311beec4f4febf2fc191948"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.094513 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"10886f85-c800-4999-8c79-c490c60696cc","Type":"ContainerDied","Data":"1a454db31dd2243c0baed5f659db3f03cf1284eb320367be8d2eeaee2d9e7140"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.094542 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"10886f85-c800-4999-8c79-c490c60696cc","Type":"ContainerDied","Data":"dfe76a7230b634adf6aebdf67b296fb27df1714714ebd62003a566a079db4ad7"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.094554 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"10886f85-c800-4999-8c79-c490c60696cc","Type":"ContainerDied","Data":"3269f937868b4639b15beb2313a77a6d697a8359d42a1eac21aab99aba4a3441"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.094562 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"10886f85-c800-4999-8c79-c490c60696cc","Type":"ContainerDied","Data":"a67a55597dfa0413c7fcfba871c60e4ca78dcc1337e642fa8a730a82b2946f38"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.094571 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"10886f85-c800-4999-8c79-c490c60696cc","Type":"ContainerDied","Data":"64e910dd424738dcc2a6a10dbfc0d43ed55b865d44976cf3ce77949fd94d142f"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.094622 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"10886f85-c800-4999-8c79-c490c60696cc","Type":"ContainerDied","Data":"1830d485d70f2c4c16c972d8eb54d3d68060d42e9eb67b0f0be4a183511992c6"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.094631 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"10886f85-c800-4999-8c79-c490c60696cc","Type":"ContainerDied","Data":"9ad134020857e5330738626c90a057bc32ca98d01d16f8a94a600086e2df114c"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.094640 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"10886f85-c800-4999-8c79-c490c60696cc","Type":"ContainerDied","Data":"b54bd1523c4248a6b946bc2484b15b9a925819b903de19e564491a32a104536e"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.094649 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"10886f85-c800-4999-8c79-c490c60696cc","Type":"ContainerDied","Data":"6af1db545967ed1a4d63df5e069cefc5f2002414e3177a1c53b51f7542200023"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.102801 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89/ovsdbserver-nb/0.log" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.103060 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89","Type":"ContainerDied","Data":"7ea225b3f72f1c383a501114dc02fb4b2abc7c21b00bacf892761e49e8546da1"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.103133 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.106645 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7fcafe6-2e7b-4893-84bc-5a3be7029ef7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e7fcafe6-2e7b-4893-84bc-5a3be7029ef7" (UID: "e7fcafe6-2e7b-4893-84bc-5a3be7029ef7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.122001 5037 generic.go:334] "Generic (PLEG): container finished" podID="a97b4f35-04a7-47c3-a658-170645023de6" containerID="d787d7c57b49308ce496dd3022253165f26b5f2096403db68cdd6ea85914b8a9" exitCode=0 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.122067 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7ccc6df59c-m5tjx" event={"ID":"a97b4f35-04a7-47c3-a658-170645023de6","Type":"ContainerDied","Data":"d787d7c57b49308ce496dd3022253165f26b5f2096403db68cdd6ea85914b8a9"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.152464 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b489db-2066-4222-9131-99da1bd054e3-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "20b489db-2066-4222-9131-99da1bd054e3" (UID: "20b489db-2066-4222-9131-99da1bd054e3"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.183077 5037 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/20b489db-2066-4222-9131-99da1bd054e3-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.183101 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7fcafe6-2e7b-4893-84bc-5a3be7029ef7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.204617 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89" (UID: "cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.247350 5037 generic.go:334] "Generic (PLEG): container finished" podID="2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e" containerID="7193d230cf98d6ea21211158364885315519bd51ee3bfb69a0d77702bb8f27cf" exitCode=143 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.247415 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e","Type":"ContainerDied","Data":"7193d230cf98d6ea21211158364885315519bd51ee3bfb69a0d77702bb8f27cf"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.252126 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novaapieb2b-account-delete-988tl"] Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.263219 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/257e4b94-6b37-4243-8e8a-6bd47f0a5603-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "257e4b94-6b37-4243-8e8a-6bd47f0a5603" (UID: "257e4b94-6b37-4243-8e8a-6bd47f0a5603"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.281659 5037 generic.go:334] "Generic (PLEG): container finished" podID="aed636f4-272c-4379-a6f3-8247ae0e46cc" containerID="2019faaa0d00a4dce9f4ce3484825a1f6132bcb7d194dfe843a8fd57678e6f7d" exitCode=0 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.281721 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7f55999cfc-jx9r6" event={"ID":"aed636f4-272c-4379-a6f3-8247ae0e46cc","Type":"ContainerDied","Data":"2019faaa0d00a4dce9f4ce3484825a1f6132bcb7d194dfe843a8fd57678e6f7d"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.287573 5037 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/257e4b94-6b37-4243-8e8a-6bd47f0a5603-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.287601 5037 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.305405 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinderc68b-account-delete-rphsq"] Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.318728 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7fcafe6-2e7b-4893-84bc-5a3be7029ef7-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "e7fcafe6-2e7b-4893-84bc-5a3be7029ef7" (UID: "e7fcafe6-2e7b-4893-84bc-5a3be7029ef7"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.328518 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55bfb77665-gz5lz" event={"ID":"257e4b94-6b37-4243-8e8a-6bd47f0a5603","Type":"ContainerDied","Data":"74fe444d85526afbafbf495f9e22b89bbb20992f9499e071b44abb627b0e2476"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.328823 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55bfb77665-gz5lz" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.337635 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell12e29-account-delete-vd2zr"] Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.344476 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89" (UID: "cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.365794 5037 generic.go:334] "Generic (PLEG): container finished" podID="80ce8a9a-aa28-40e4-ac35-c7d379224208" containerID="1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff" exitCode=0 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.365870 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-264cs" event={"ID":"80ce8a9a-aa28-40e4-ac35-c7d379224208","Type":"ContainerDied","Data":"1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.369133 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novacell0e6d6-account-delete-j5w7q"] Nov 26 14:44:06 crc kubenswrapper[5037]: W1126 14:44:05.381487 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod03ffa609_b428_4a0e_8ec1_5c205391cf7b.slice/crio-07efdc2b707f4665d7954a7295427640db0b0cc66e0a067b1b9c787a54fd5add WatchSource:0}: Error finding container 07efdc2b707f4665d7954a7295427640db0b0cc66e0a067b1b9c787a54fd5add: Status 404 returned error can't find the container with id 07efdc2b707f4665d7954a7295427640db0b0cc66e0a067b1b9c787a54fd5add Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.384490 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/257e4b94-6b37-4243-8e8a-6bd47f0a5603-config" (OuterVolumeSpecName: "config") pod "257e4b94-6b37-4243-8e8a-6bd47f0a5603" (UID: "257e4b94-6b37-4243-8e8a-6bd47f0a5603"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.388987 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/257e4b94-6b37-4243-8e8a-6bd47f0a5603-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.389011 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.389023 5037 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e7fcafe6-2e7b-4893-84bc-5a3be7029ef7-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:06 crc kubenswrapper[5037]: E1126 14:44:05.393593 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b658272682462e675abb3613cfedbdb070fda6a2ec653d911c6d60b0faa08bd2" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.393827 5037 generic.go:334] "Generic (PLEG): container finished" podID="6d49cc40-ce20-415f-a979-398430c2bd81" containerID="88558c083c5cd020dbbbc7911d8c1ff0846d988d99c33563252e02c9bde2f0cf" exitCode=143 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.393933 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7c767587b5-nzlv9" event={"ID":"6d49cc40-ce20-415f-a979-398430c2bd81","Type":"ContainerDied","Data":"88558c083c5cd020dbbbc7911d8c1ff0846d988d99c33563252e02c9bde2f0cf"} Nov 26 14:44:06 crc kubenswrapper[5037]: E1126 14:44:05.397648 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b658272682462e675abb3613cfedbdb070fda6a2ec653d911c6d60b0faa08bd2" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 26 14:44:06 crc kubenswrapper[5037]: E1126 14:44:05.399204 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b658272682462e675abb3613cfedbdb070fda6a2ec653d911c6d60b0faa08bd2" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 26 14:44:06 crc kubenswrapper[5037]: E1126 14:44:05.399240 5037 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="8707a232-f648-4795-b250-d29069f26514" containerName="nova-cell0-conductor-conductor" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.408575 5037 generic.go:334] "Generic (PLEG): container finished" podID="e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e" containerID="d79abe361aef8985708638422b648d6c91d88cc2db1ffbf2d1c043eb4548ba88" exitCode=143 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.408640 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e","Type":"ContainerDied","Data":"d79abe361aef8985708638422b648d6c91d88cc2db1ffbf2d1c043eb4548ba88"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.429138 5037 generic.go:334] "Generic (PLEG): container finished" podID="19ae84d4-26f8-4e11-bd01-da880def5547" containerID="355cc9901e399458175cd4640ef40324803629a86ea9a4d2abc2824da07c4f8d" exitCode=143 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.429215 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7ddc4956b6-dfqsv" event={"ID":"19ae84d4-26f8-4e11-bd01-da880def5547","Type":"ContainerDied","Data":"355cc9901e399458175cd4640ef40324803629a86ea9a4d2abc2824da07c4f8d"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.442570 5037 generic.go:334] "Generic (PLEG): container finished" podID="07720f90-b6f7-4b81-9c32-17f1e72b19fa" containerID="1ccf73ea43e62a2d000418194aef023e26ee721280485b1329df2b411c630259" exitCode=143 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.442844 5037 scope.go:117] "RemoveContainer" containerID="15306a8687a9663850db67213877c281cc0db7db6eb704f63cd32810d22a787d" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.442924 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"07720f90-b6f7-4b81-9c32-17f1e72b19fa","Type":"ContainerDied","Data":"1ccf73ea43e62a2d000418194aef023e26ee721280485b1329df2b411c630259"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.446688 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.452327 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.459024 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.480090 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-j7ksk"] Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.503754 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-metrics-j7ksk"] Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.510339 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement259c-account-delete-rj8qs" podStartSLOduration=4.510323973 podStartE2EDuration="4.510323973s" podCreationTimestamp="2025-11-26 14:44:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:44:05.464903414 +0000 UTC m=+1712.261673598" watchObservedRunningTime="2025-11-26 14:44:05.510323973 +0000 UTC m=+1712.307094157" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.543742 5037 scope.go:117] "RemoveContainer" containerID="a2145e917b1ed177f4eb5739c42f821fe6ce560720e71b9834ad76a33c523409" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.556631 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7fcafe6-2e7b-4893-84bc-5a3be7029ef7-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "e7fcafe6-2e7b-4893-84bc-5a3be7029ef7" (UID: "e7fcafe6-2e7b-4893-84bc-5a3be7029ef7"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.592871 5037 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e7fcafe6-2e7b-4893-84bc-5a3be7029ef7-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.595427 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-ovsdbserver-nb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-nb-tls-certs") pod "cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89" (UID: "cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89"). InnerVolumeSpecName "ovsdbserver-nb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.621026 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/257e4b94-6b37-4243-8e8a-6bd47f0a5603-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "257e4b94-6b37-4243-8e8a-6bd47f0a5603" (UID: "257e4b94-6b37-4243-8e8a-6bd47f0a5603"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.668033 5037 scope.go:117] "RemoveContainer" containerID="084147140f433c529e6be96361e0c147011e55c6ffe26e746fd701df366832bd" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.694725 5037 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/257e4b94-6b37-4243-8e8a-6bd47f0a5603-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.694744 5037 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89-ovsdbserver-nb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.750692 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55bfb77665-gz5lz"] Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.774248 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-55bfb77665-gz5lz"] Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.774402 5037 scope.go:117] "RemoveContainer" containerID="600eca6ef64f5ec22ada4dfc68aa6316b18796bc98f34fd7f889faa193bbcf49" Nov 26 14:44:06 crc kubenswrapper[5037]: E1126 14:44:05.798311 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff is running failed: container process not found" containerID="1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 14:44:06 crc kubenswrapper[5037]: E1126 14:44:05.799243 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff is running failed: container process not found" containerID="1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 14:44:06 crc kubenswrapper[5037]: E1126 14:44:05.800395 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff is running failed: container process not found" containerID="1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 14:44:06 crc kubenswrapper[5037]: E1126 14:44:05.800717 5037 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-264cs" podUID="80ce8a9a-aa28-40e4-ac35-c7d379224208" containerName="ovsdb-server" Nov 26 14:44:06 crc kubenswrapper[5037]: E1126 14:44:05.802658 5037 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 26 14:44:06 crc kubenswrapper[5037]: E1126 14:44:05.802737 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ba78b94a-32d0-4377-ac41-ffd036b241bf-config-data podName:ba78b94a-32d0-4377-ac41-ffd036b241bf nodeName:}" failed. No retries permitted until 2025-11-26 14:44:09.802719195 +0000 UTC m=+1716.599489379 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/ba78b94a-32d0-4377-ac41-ffd036b241bf-config-data") pod "rabbitmq-server-0" (UID: "ba78b94a-32d0-4377-ac41-ffd036b241bf") : configmap "rabbitmq-config-data" not found Nov 26 14:44:06 crc kubenswrapper[5037]: E1126 14:44:05.815575 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="dc60cd871e55f538b7db49d446338e526894553aaa076fdbe1e2f04a853fb486" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 14:44:06 crc kubenswrapper[5037]: E1126 14:44:05.834960 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="dc60cd871e55f538b7db49d446338e526894553aaa076fdbe1e2f04a853fb486" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 14:44:06 crc kubenswrapper[5037]: E1126 14:44:05.838095 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="dc60cd871e55f538b7db49d446338e526894553aaa076fdbe1e2f04a853fb486" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 14:44:06 crc kubenswrapper[5037]: E1126 14:44:05.838257 5037 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-264cs" podUID="80ce8a9a-aa28-40e4-ac35-c7d379224208" containerName="ovs-vswitchd" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.841498 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.862231 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.871512 5037 scope.go:117] "RemoveContainer" containerID="963cb889a355aaf5eaa5d102c5937c4f8735b969d035ba5db0079e3607909577" Nov 26 14:44:06 crc kubenswrapper[5037]: E1126 14:44:05.907700 5037 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 26 14:44:06 crc kubenswrapper[5037]: E1126 14:44:05.907764 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/7f05291f-1331-411b-9971-c71218d11a35-config-data podName:7f05291f-1331-411b-9971-c71218d11a35 nodeName:}" failed. No retries permitted until 2025-11-26 14:44:09.9077471 +0000 UTC m=+1716.704517284 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/7f05291f-1331-411b-9971-c71218d11a35-config-data") pod "rabbitmq-cell1-server-0" (UID: "7f05291f-1331-411b-9971-c71218d11a35") : configmap "rabbitmq-cell1-config-data" not found Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.930844 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="013f650e-1eaf-4a38-b62d-5e9efbf8b6b5" path="/var/lib/kubelet/pods/013f650e-1eaf-4a38-b62d-5e9efbf8b6b5/volumes" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.932992 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b489db-2066-4222-9131-99da1bd054e3" path="/var/lib/kubelet/pods/20b489db-2066-4222-9131-99da1bd054e3/volumes" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.934007 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="257e4b94-6b37-4243-8e8a-6bd47f0a5603" path="/var/lib/kubelet/pods/257e4b94-6b37-4243-8e8a-6bd47f0a5603/volumes" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.938484 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="42522a9f-0861-47fb-9d66-65039590aeaf" path="/var/lib/kubelet/pods/42522a9f-0861-47fb-9d66-65039590aeaf/volumes" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.939280 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="56e600a6-1625-4844-9a51-da79b454cd34" path="/var/lib/kubelet/pods/56e600a6-1625-4844-9a51-da79b454cd34/volumes" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.940131 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48" path="/var/lib/kubelet/pods/ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48/volumes" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.941577 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5f1c1b7-8cc8-4be5-893b-83d6ee0e5a79" path="/var/lib/kubelet/pods/b5f1c1b7-8cc8-4be5-893b-83d6ee0e5a79/volumes" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.942740 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89" path="/var/lib/kubelet/pods/cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89/volumes" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:05.943638 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7fcafe6-2e7b-4893-84bc-5a3be7029ef7" path="/var/lib/kubelet/pods/e7fcafe6-2e7b-4893-84bc-5a3be7029ef7/volumes" Nov 26 14:44:06 crc kubenswrapper[5037]: E1126 14:44:06.283501 5037 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err=< Nov 26 14:44:06 crc kubenswrapper[5037]: command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: 2025-11-26T14:44:04Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Nov 26 14:44:06 crc kubenswrapper[5037]: /etc/init.d/functions: line 589: 414 Alarm clock "$@" Nov 26 14:44:06 crc kubenswrapper[5037]: > execCommand=["/usr/share/ovn/scripts/ovn-ctl","stop_controller"] containerName="ovn-controller" pod="openstack/ovn-controller-ptz2q" message=< Nov 26 14:44:06 crc kubenswrapper[5037]: Exiting ovn-controller (1) [FAILED] Nov 26 14:44:06 crc kubenswrapper[5037]: Killing ovn-controller (1) [ OK ] Nov 26 14:44:06 crc kubenswrapper[5037]: 2025-11-26T14:44:04Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Nov 26 14:44:06 crc kubenswrapper[5037]: /etc/init.d/functions: line 589: 414 Alarm clock "$@" Nov 26 14:44:06 crc kubenswrapper[5037]: > Nov 26 14:44:06 crc kubenswrapper[5037]: E1126 14:44:06.283820 5037 kuberuntime_container.go:691] "PreStop hook failed" err=< Nov 26 14:44:06 crc kubenswrapper[5037]: command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: 2025-11-26T14:44:04Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Nov 26 14:44:06 crc kubenswrapper[5037]: /etc/init.d/functions: line 589: 414 Alarm clock "$@" Nov 26 14:44:06 crc kubenswrapper[5037]: > pod="openstack/ovn-controller-ptz2q" podUID="6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf" containerName="ovn-controller" containerID="cri-o://4f3ea9d9853eb70966721e3e3e7a15223cf43f7532f45c09e3855990aac57118" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.283867 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ptz2q" podUID="6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf" containerName="ovn-controller" containerID="cri-o://4f3ea9d9853eb70966721e3e3e7a15223cf43f7532f45c09e3855990aac57118" gracePeriod=27 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.286445 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-ptz2q" podUID="6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf" containerName="ovn-controller" probeResult="failure" output="" Nov 26 14:44:06 crc kubenswrapper[5037]: E1126 14:44:06.310086 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="d3158b8703e1c139eecff816090fc54bf7b1598ce59a6a91d56a6bde613e9529" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 26 14:44:06 crc kubenswrapper[5037]: E1126 14:44:06.313594 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="d3158b8703e1c139eecff816090fc54bf7b1598ce59a6a91d56a6bde613e9529" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 26 14:44:06 crc kubenswrapper[5037]: E1126 14:44:06.317019 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="d3158b8703e1c139eecff816090fc54bf7b1598ce59a6a91d56a6bde613e9529" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 26 14:44:06 crc kubenswrapper[5037]: E1126 14:44:06.317185 5037 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-northd-0" podUID="ec26620a-6ad8-4792-bb25-543dc31d3be5" containerName="ovn-northd" Nov 26 14:44:06 crc kubenswrapper[5037]: E1126 14:44:06.399692 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b37dad75bebb2e8ff92fd84a2c83e4b7a2ff235be32f88191ebf7baf5089d611 is running failed: container process not found" containerID="b37dad75bebb2e8ff92fd84a2c83e4b7a2ff235be32f88191ebf7baf5089d611" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 14:44:06 crc kubenswrapper[5037]: E1126 14:44:06.400203 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b37dad75bebb2e8ff92fd84a2c83e4b7a2ff235be32f88191ebf7baf5089d611 is running failed: container process not found" containerID="b37dad75bebb2e8ff92fd84a2c83e4b7a2ff235be32f88191ebf7baf5089d611" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 14:44:06 crc kubenswrapper[5037]: E1126 14:44:06.400636 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b37dad75bebb2e8ff92fd84a2c83e4b7a2ff235be32f88191ebf7baf5089d611 is running failed: container process not found" containerID="b37dad75bebb2e8ff92fd84a2c83e4b7a2ff235be32f88191ebf7baf5089d611" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 14:44:06 crc kubenswrapper[5037]: E1126 14:44:06.400678 5037 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b37dad75bebb2e8ff92fd84a2c83e4b7a2ff235be32f88191ebf7baf5089d611 is running failed: container process not found" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="9c3c49ff-cf53-4b5b-ba83-10877d499763" containerName="nova-scheduler-scheduler" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.463436 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.465039 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7b90229c-2a39-4627-896f-9c1b27e4f1d5" containerName="ceilometer-central-agent" containerID="cri-o://d9e7e3cabf68f8c77fe540ee66229c4f639270ea37015bbb512cd6402e09b909" gracePeriod=30 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.465555 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7b90229c-2a39-4627-896f-9c1b27e4f1d5" containerName="proxy-httpd" containerID="cri-o://4b66f0aa9cd359a08c2fc701fbd668ab4e74119711476d65846830c7024d146e" gracePeriod=30 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.465618 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7b90229c-2a39-4627-896f-9c1b27e4f1d5" containerName="sg-core" containerID="cri-o://e755b8c60d9bc3fba924bee940809c862f89fc6885ca06dec0c4232e6e6116ba" gracePeriod=30 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.465671 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7b90229c-2a39-4627-896f-9c1b27e4f1d5" containerName="ceilometer-notification-agent" containerID="cri-o://71d63cfc921e9e46b869f583ba1be1fd0b73e384b0a9b8c0e83735a75f13ecda" gracePeriod=30 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.506746 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance4d25-account-delete-pftxq" event={"ID":"5b53df32-369f-4a91-bb97-5da067cc3c6a","Type":"ContainerStarted","Data":"79410aa1ae42d0ccbb1f24403b971fbdf2b68b70ddace0cddb3256c93023bbb4"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.506791 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance4d25-account-delete-pftxq" event={"ID":"5b53df32-369f-4a91-bb97-5da067cc3c6a","Type":"ContainerStarted","Data":"f03e59690ac79242d781f2c6ec08d1f34665025a0bc60a8ed94b3b2272e3ab97"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.525893 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.526127 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="fd171888-b656-4511-af7d-cdff1058bf5f" containerName="kube-state-metrics" containerID="cri-o://9f6f522c179ad5b2c8f6d172e255b1812ec80e1f19a7f152457199761310283e" gracePeriod=30 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.543031 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell0e6d6-account-delete-j5w7q" event={"ID":"d4fd340f-f656-4ec3-aba1-a33eaa58aed0","Type":"ContainerStarted","Data":"3dcbf6703b46c5a8172c41249335b346fb201a3ba3a13f8712f5740aef9fb594"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.543068 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell0e6d6-account-delete-j5w7q" event={"ID":"d4fd340f-f656-4ec3-aba1-a33eaa58aed0","Type":"ContainerStarted","Data":"86e6ff4d67702c5fe009cee75f287a92fccea94f97a8f860d2119eb000575ab6"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.557304 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance4d25-account-delete-pftxq" podStartSLOduration=4.5572695450000005 podStartE2EDuration="4.557269545s" podCreationTimestamp="2025-11-26 14:44:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:44:06.548264374 +0000 UTC m=+1713.345034558" watchObservedRunningTime="2025-11-26 14:44:06.557269545 +0000 UTC m=+1713.354039729" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.562639 5037 generic.go:334] "Generic (PLEG): container finished" podID="b0797697-2b6d-4684-9fe1-e17a91f80369" containerID="3343fff4b72744dd55e8f3db651a2d5ac305f3aa671a086e0c10622269957929" exitCode=0 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.562734 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron3cec-account-delete-42tll" event={"ID":"b0797697-2b6d-4684-9fe1-e17a91f80369","Type":"ContainerDied","Data":"3343fff4b72744dd55e8f3db651a2d5ac305f3aa671a086e0c10622269957929"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.562767 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron3cec-account-delete-42tll" event={"ID":"b0797697-2b6d-4684-9fe1-e17a91f80369","Type":"ContainerStarted","Data":"81f4258351ce40c54496f1c4d7aaa812e753dfd8441f5317cddb0ce60ce82d98"} Nov 26 14:44:06 crc kubenswrapper[5037]: E1126 14:44:06.583850 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of cafd25254996ab2af2a3389cdf0cdcd2a0d515e80e24d59e43ba4b1e34bf696b is running failed: container process not found" containerID="cafd25254996ab2af2a3389cdf0cdcd2a0d515e80e24d59e43ba4b1e34bf696b" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.587813 5037 generic.go:334] "Generic (PLEG): container finished" podID="aed636f4-272c-4379-a6f3-8247ae0e46cc" containerID="ac124cedaed73284f9bbf36277718f97b7c752f739fd563062cca9f2857a6274" exitCode=0 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.587879 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7f55999cfc-jx9r6" event={"ID":"aed636f4-272c-4379-a6f3-8247ae0e46cc","Type":"ContainerDied","Data":"ac124cedaed73284f9bbf36277718f97b7c752f739fd563062cca9f2857a6274"} Nov 26 14:44:06 crc kubenswrapper[5037]: E1126 14:44:06.587924 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of cafd25254996ab2af2a3389cdf0cdcd2a0d515e80e24d59e43ba4b1e34bf696b is running failed: container process not found" containerID="cafd25254996ab2af2a3389cdf0cdcd2a0d515e80e24d59e43ba4b1e34bf696b" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 26 14:44:06 crc kubenswrapper[5037]: E1126 14:44:06.591430 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of cafd25254996ab2af2a3389cdf0cdcd2a0d515e80e24d59e43ba4b1e34bf696b is running failed: container process not found" containerID="cafd25254996ab2af2a3389cdf0cdcd2a0d515e80e24d59e43ba4b1e34bf696b" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 26 14:44:06 crc kubenswrapper[5037]: E1126 14:44:06.591493 5037 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of cafd25254996ab2af2a3389cdf0cdcd2a0d515e80e24d59e43ba4b1e34bf696b is running failed: container process not found" probeType="Readiness" pod="openstack/nova-cell1-conductor-0" podUID="dd47ce65-1426-47e2-a5d1-6efd83bac3ab" containerName="nova-cell1-conductor-conductor" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.594655 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/novacell0e6d6-account-delete-j5w7q" podStartSLOduration=4.594642448 podStartE2EDuration="4.594642448s" podCreationTimestamp="2025-11-26 14:44:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 14:44:06.574537446 +0000 UTC m=+1713.371307640" watchObservedRunningTime="2025-11-26 14:44:06.594642448 +0000 UTC m=+1713.391412632" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.598391 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ptz2q_6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf/ovn-controller/0.log" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.598429 5037 generic.go:334] "Generic (PLEG): container finished" podID="6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf" containerID="4f3ea9d9853eb70966721e3e3e7a15223cf43f7532f45c09e3855990aac57118" exitCode=143 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.598476 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ptz2q" event={"ID":"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf","Type":"ContainerDied","Data":"4f3ea9d9853eb70966721e3e3e7a15223cf43f7532f45c09e3855990aac57118"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.609168 5037 generic.go:334] "Generic (PLEG): container finished" podID="03ffa609-b428-4a0e-8ec1-5c205391cf7b" containerID="771c3c79a09cab53a7b40d533ff448b485921c9b6c6b640a07a8ddd8e73a21e8" exitCode=1 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.609258 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell12e29-account-delete-vd2zr" event={"ID":"03ffa609-b428-4a0e-8ec1-5c205391cf7b","Type":"ContainerDied","Data":"771c3c79a09cab53a7b40d533ff448b485921c9b6c6b640a07a8ddd8e73a21e8"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.609316 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell12e29-account-delete-vd2zr" event={"ID":"03ffa609-b428-4a0e-8ec1-5c205391cf7b","Type":"ContainerStarted","Data":"07efdc2b707f4665d7954a7295427640db0b0cc66e0a067b1b9c787a54fd5add"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.631368 5037 generic.go:334] "Generic (PLEG): container finished" podID="dd47ce65-1426-47e2-a5d1-6efd83bac3ab" containerID="cafd25254996ab2af2a3389cdf0cdcd2a0d515e80e24d59e43ba4b1e34bf696b" exitCode=0 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.631469 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"dd47ce65-1426-47e2-a5d1-6efd83bac3ab","Type":"ContainerDied","Data":"cafd25254996ab2af2a3389cdf0cdcd2a0d515e80e24d59e43ba4b1e34bf696b"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.647837 5037 generic.go:334] "Generic (PLEG): container finished" podID="8df4197d-046b-4b35-a14a-b382bda46242" containerID="a374ebe6c9d1ff0211c156c2166fd3bbda729a88ddf0dea44c5645e9f9f331e9" exitCode=0 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.647938 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapieb2b-account-delete-988tl" event={"ID":"8df4197d-046b-4b35-a14a-b382bda46242","Type":"ContainerDied","Data":"a374ebe6c9d1ff0211c156c2166fd3bbda729a88ddf0dea44c5645e9f9f331e9"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.647966 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapieb2b-account-delete-988tl" event={"ID":"8df4197d-046b-4b35-a14a-b382bda46242","Type":"ContainerStarted","Data":"d72a3e29fa0941334f039a0f517223c2c8372f404345b575bc7e1b78606d6be3"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.651394 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.654709 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/memcached-0" podUID="bdd4849b-e92e-473d-88d0-74c060c04eb7" containerName="memcached" containerID="cri-o://ac800c71f24567e467410fb7c333d7691707d518b6f0f84492f89244a18f9205" gracePeriod=30 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.676988 5037 generic.go:334] "Generic (PLEG): container finished" podID="9c3c49ff-cf53-4b5b-ba83-10877d499763" containerID="b37dad75bebb2e8ff92fd84a2c83e4b7a2ff235be32f88191ebf7baf5089d611" exitCode=0 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.677056 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9c3c49ff-cf53-4b5b-ba83-10877d499763","Type":"ContainerDied","Data":"b37dad75bebb2e8ff92fd84a2c83e4b7a2ff235be32f88191ebf7baf5089d611"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.768678 5037 generic.go:334] "Generic (PLEG): container finished" podID="b3dc5e2c-0729-4f4d-8481-bd8fb0064a80" containerID="0cbfde75083001a5dadd341f77eb28cdbbc9bc953e4de5164a5c584470961385" exitCode=0 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.768806 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbicanf9d3-account-delete-qgkj4" event={"ID":"b3dc5e2c-0729-4f4d-8481-bd8fb0064a80","Type":"ContainerDied","Data":"0cbfde75083001a5dadd341f77eb28cdbbc9bc953e4de5164a5c584470961385"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.768836 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbicanf9d3-account-delete-qgkj4" event={"ID":"b3dc5e2c-0729-4f4d-8481-bd8fb0064a80","Type":"ContainerStarted","Data":"f00b77b3c11461b44434170237716153a3fb1bc043b240f76ba063db66fd2cfd"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.805414 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-dkcpw"] Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.836217 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-dkcpw"] Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.838818 5037 generic.go:334] "Generic (PLEG): container finished" podID="a7ece585-54a5-40d4-866f-98c968f03910" containerID="5edb9e133e8d96c8435dd93ddc03dd31254084b4ce585ca69545dc07a5d56468" exitCode=0 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.838878 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement259c-account-delete-rj8qs" event={"ID":"a7ece585-54a5-40d4-866f-98c968f03910","Type":"ContainerDied","Data":"5edb9e133e8d96c8435dd93ddc03dd31254084b4ce585ca69545dc07a5d56468"} Nov 26 14:44:06 crc kubenswrapper[5037]: E1126 14:44:06.840941 5037 secret.go:188] Couldn't get secret openstack/cinder-scheduler-config-data: secret "cinder-scheduler-config-data" not found Nov 26 14:44:06 crc kubenswrapper[5037]: E1126 14:44:06.840987 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-config-data-custom podName:fe17b260-d105-4274-88d1-d85fd9948f9f nodeName:}" failed. No retries permitted until 2025-11-26 14:44:10.840975934 +0000 UTC m=+1717.637746118 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data-custom" (UniqueName: "kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-config-data-custom") pod "cinder-scheduler-0" (UID: "fe17b260-d105-4274-88d1-d85fd9948f9f") : secret "cinder-scheduler-config-data" not found Nov 26 14:44:06 crc kubenswrapper[5037]: E1126 14:44:06.841264 5037 secret.go:188] Couldn't get secret openstack/cinder-config-data: secret "cinder-config-data" not found Nov 26 14:44:06 crc kubenswrapper[5037]: E1126 14:44:06.841302 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-config-data podName:fe17b260-d105-4274-88d1-d85fd9948f9f nodeName:}" failed. No retries permitted until 2025-11-26 14:44:10.841294902 +0000 UTC m=+1717.638065076 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-config-data") pod "cinder-scheduler-0" (UID: "fe17b260-d105-4274-88d1-d85fd9948f9f") : secret "cinder-config-data" not found Nov 26 14:44:06 crc kubenswrapper[5037]: E1126 14:44:06.841337 5037 secret.go:188] Couldn't get secret openstack/cinder-scripts: secret "cinder-scripts" not found Nov 26 14:44:06 crc kubenswrapper[5037]: E1126 14:44:06.841355 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-scripts podName:fe17b260-d105-4274-88d1-d85fd9948f9f nodeName:}" failed. No retries permitted until 2025-11-26 14:44:10.841349164 +0000 UTC m=+1717.638119338 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "scripts" (UniqueName: "kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-scripts") pod "cinder-scheduler-0" (UID: "fe17b260-d105-4274-88d1-d85fd9948f9f") : secret "cinder-scripts" not found Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.844988 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-959f6"] Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.849749 5037 generic.go:334] "Generic (PLEG): container finished" podID="fe17b260-d105-4274-88d1-d85fd9948f9f" containerID="140e7be2182c285f86914d1d0349ab0f880704f06b09bd28f8522e6957b1e06c" exitCode=0 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.849819 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"fe17b260-d105-4274-88d1-d85fd9948f9f","Type":"ContainerDied","Data":"140e7be2182c285f86914d1d0349ab0f880704f06b09bd28f8522e6957b1e06c"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.857346 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-959f6"] Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.863797 5037 generic.go:334] "Generic (PLEG): container finished" podID="4408c030-a5ac-49ae-9361-54cbe3c27108" containerID="4f57619ebc65ee19c82e274478cdb8f19dd8e02a6b90642fbf2271294bdfb236" exitCode=0 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.863864 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"4408c030-a5ac-49ae-9361-54cbe3c27108","Type":"ContainerDied","Data":"4f57619ebc65ee19c82e274478cdb8f19dd8e02a6b90642fbf2271294bdfb236"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.870031 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-7f55999cfc-jx9r6" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.872535 5037 generic.go:334] "Generic (PLEG): container finished" podID="10cd5eda-54cc-4c0a-91ca-4f8217e5220e" containerID="d1739e90ae050e6e31c02bc769bc3e3f53dba3dae117e4ebfd3699fc77b04e33" exitCode=0 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.872612 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinderc68b-account-delete-rphsq" event={"ID":"10cd5eda-54cc-4c0a-91ca-4f8217e5220e","Type":"ContainerDied","Data":"d1739e90ae050e6e31c02bc769bc3e3f53dba3dae117e4ebfd3699fc77b04e33"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.872660 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinderc68b-account-delete-rphsq" event={"ID":"10cd5eda-54cc-4c0a-91ca-4f8217e5220e","Type":"ContainerStarted","Data":"999353903a6393e18d00372d1f68498c83ff83a66141a099c6f2985c61b7ce94"} Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.872931 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.876082 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-fb548d49-hf8zh"] Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.876377 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/keystone-fb548d49-hf8zh" podUID="fe13f626-50c7-4ec3-b967-20f038731571" containerName="keystone-api" containerID="cri-o://ca5593d895153686d827f3a444c0ce51200735ce910a9e9d65ec173d66664c8b" gracePeriod=30 Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.879127 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.885376 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.901778 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-bvnrk"] Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.908799 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-bvnrk"] Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.941669 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/aed636f4-272c-4379-a6f3-8247ae0e46cc-etc-swift\") pod \"aed636f4-272c-4379-a6f3-8247ae0e46cc\" (UID: \"aed636f4-272c-4379-a6f3-8247ae0e46cc\") " Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.941703 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aed636f4-272c-4379-a6f3-8247ae0e46cc-config-data\") pod \"aed636f4-272c-4379-a6f3-8247ae0e46cc\" (UID: \"aed636f4-272c-4379-a6f3-8247ae0e46cc\") " Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.941732 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4408c030-a5ac-49ae-9361-54cbe3c27108-combined-ca-bundle\") pod \"4408c030-a5ac-49ae-9361-54cbe3c27108\" (UID: \"4408c030-a5ac-49ae-9361-54cbe3c27108\") " Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.941782 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4408c030-a5ac-49ae-9361-54cbe3c27108-config-data\") pod \"4408c030-a5ac-49ae-9361-54cbe3c27108\" (UID: \"4408c030-a5ac-49ae-9361-54cbe3c27108\") " Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.941811 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/aed636f4-272c-4379-a6f3-8247ae0e46cc-internal-tls-certs\") pod \"aed636f4-272c-4379-a6f3-8247ae0e46cc\" (UID: \"aed636f4-272c-4379-a6f3-8247ae0e46cc\") " Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.941839 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/4408c030-a5ac-49ae-9361-54cbe3c27108-nova-novncproxy-tls-certs\") pod \"4408c030-a5ac-49ae-9361-54cbe3c27108\" (UID: \"4408c030-a5ac-49ae-9361-54cbe3c27108\") " Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.941876 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/4408c030-a5ac-49ae-9361-54cbe3c27108-vencrypt-tls-certs\") pod \"4408c030-a5ac-49ae-9361-54cbe3c27108\" (UID: \"4408c030-a5ac-49ae-9361-54cbe3c27108\") " Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.941904 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c3c49ff-cf53-4b5b-ba83-10877d499763-combined-ca-bundle\") pod \"9c3c49ff-cf53-4b5b-ba83-10877d499763\" (UID: \"9c3c49ff-cf53-4b5b-ba83-10877d499763\") " Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.941943 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t7htm\" (UniqueName: \"kubernetes.io/projected/4408c030-a5ac-49ae-9361-54cbe3c27108-kube-api-access-t7htm\") pod \"4408c030-a5ac-49ae-9361-54cbe3c27108\" (UID: \"4408c030-a5ac-49ae-9361-54cbe3c27108\") " Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.941977 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d9zrl\" (UniqueName: \"kubernetes.io/projected/9c3c49ff-cf53-4b5b-ba83-10877d499763-kube-api-access-d9zrl\") pod \"9c3c49ff-cf53-4b5b-ba83-10877d499763\" (UID: \"9c3c49ff-cf53-4b5b-ba83-10877d499763\") " Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.941998 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/aed636f4-272c-4379-a6f3-8247ae0e46cc-run-httpd\") pod \"aed636f4-272c-4379-a6f3-8247ae0e46cc\" (UID: \"aed636f4-272c-4379-a6f3-8247ae0e46cc\") " Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.942028 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/aed636f4-272c-4379-a6f3-8247ae0e46cc-log-httpd\") pod \"aed636f4-272c-4379-a6f3-8247ae0e46cc\" (UID: \"aed636f4-272c-4379-a6f3-8247ae0e46cc\") " Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.942083 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c3c49ff-cf53-4b5b-ba83-10877d499763-config-data\") pod \"9c3c49ff-cf53-4b5b-ba83-10877d499763\" (UID: \"9c3c49ff-cf53-4b5b-ba83-10877d499763\") " Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.942723 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aed636f4-272c-4379-a6f3-8247ae0e46cc-combined-ca-bundle\") pod \"aed636f4-272c-4379-a6f3-8247ae0e46cc\" (UID: \"aed636f4-272c-4379-a6f3-8247ae0e46cc\") " Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.942774 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/aed636f4-272c-4379-a6f3-8247ae0e46cc-public-tls-certs\") pod \"aed636f4-272c-4379-a6f3-8247ae0e46cc\" (UID: \"aed636f4-272c-4379-a6f3-8247ae0e46cc\") " Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.942827 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m28hp\" (UniqueName: \"kubernetes.io/projected/aed636f4-272c-4379-a6f3-8247ae0e46cc-kube-api-access-m28hp\") pod \"aed636f4-272c-4379-a6f3-8247ae0e46cc\" (UID: \"aed636f4-272c-4379-a6f3-8247ae0e46cc\") " Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.952986 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aed636f4-272c-4379-a6f3-8247ae0e46cc-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "aed636f4-272c-4379-a6f3-8247ae0e46cc" (UID: "aed636f4-272c-4379-a6f3-8247ae0e46cc"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.959540 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-c2fe-account-create-update-r7tgg"] Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.960800 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4408c030-a5ac-49ae-9361-54cbe3c27108-kube-api-access-t7htm" (OuterVolumeSpecName: "kube-api-access-t7htm") pod "4408c030-a5ac-49ae-9361-54cbe3c27108" (UID: "4408c030-a5ac-49ae-9361-54cbe3c27108"). InnerVolumeSpecName "kube-api-access-t7htm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.961430 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aed636f4-272c-4379-a6f3-8247ae0e46cc-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "aed636f4-272c-4379-a6f3-8247ae0e46cc" (UID: "aed636f4-272c-4379-a6f3-8247ae0e46cc"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.965313 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ptz2q_6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf/ovn-controller/0.log" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.965386 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ptz2q" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.967609 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-c2fe-account-create-update-r7tgg"] Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.968415 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aed636f4-272c-4379-a6f3-8247ae0e46cc-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "aed636f4-272c-4379-a6f3-8247ae0e46cc" (UID: "aed636f4-272c-4379-a6f3-8247ae0e46cc"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.972162 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aed636f4-272c-4379-a6f3-8247ae0e46cc-kube-api-access-m28hp" (OuterVolumeSpecName: "kube-api-access-m28hp") pod "aed636f4-272c-4379-a6f3-8247ae0e46cc" (UID: "aed636f4-272c-4379-a6f3-8247ae0e46cc"). InnerVolumeSpecName "kube-api-access-m28hp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.975959 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c3c49ff-cf53-4b5b-ba83-10877d499763-kube-api-access-d9zrl" (OuterVolumeSpecName: "kube-api-access-d9zrl") pod "9c3c49ff-cf53-4b5b-ba83-10877d499763" (UID: "9c3c49ff-cf53-4b5b-ba83-10877d499763"). InnerVolumeSpecName "kube-api-access-d9zrl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.979893 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-nrvrw"] Nov 26 14:44:06 crc kubenswrapper[5037]: I1126 14:44:06.990071 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-nrvrw"] Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.013603 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9c3c49ff-cf53-4b5b-ba83-10877d499763-config-data" (OuterVolumeSpecName: "config-data") pod "9c3c49ff-cf53-4b5b-ba83-10877d499763" (UID: "9c3c49ff-cf53-4b5b-ba83-10877d499763"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.028731 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-259c-account-create-update-jrh9b"] Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.032801 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9c3c49ff-cf53-4b5b-ba83-10877d499763-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9c3c49ff-cf53-4b5b-ba83-10877d499763" (UID: "9c3c49ff-cf53-4b5b-ba83-10877d499763"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.037034 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement259c-account-delete-rj8qs"] Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.044361 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4408c030-a5ac-49ae-9361-54cbe3c27108-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4408c030-a5ac-49ae-9361-54cbe3c27108" (UID: "4408c030-a5ac-49ae-9361-54cbe3c27108"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.044937 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-var-run-ovn\") pod \"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf\" (UID: \"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf\") " Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.044984 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-var-run\") pod \"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf\" (UID: \"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf\") " Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.045092 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s5rjc\" (UniqueName: \"kubernetes.io/projected/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-kube-api-access-s5rjc\") pod \"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf\" (UID: \"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf\") " Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.045128 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-var-log-ovn\") pod \"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf\" (UID: \"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf\") " Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.045219 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-ovn-controller-tls-certs\") pod \"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf\" (UID: \"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf\") " Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.045271 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-combined-ca-bundle\") pod \"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf\" (UID: \"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf\") " Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.045353 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-scripts\") pod \"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf\" (UID: \"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf\") " Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.045592 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-259c-account-create-update-jrh9b"] Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.045808 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c3c49ff-cf53-4b5b-ba83-10877d499763-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.045821 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t7htm\" (UniqueName: \"kubernetes.io/projected/4408c030-a5ac-49ae-9361-54cbe3c27108-kube-api-access-t7htm\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.045833 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d9zrl\" (UniqueName: \"kubernetes.io/projected/9c3c49ff-cf53-4b5b-ba83-10877d499763-kube-api-access-d9zrl\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.045842 5037 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/aed636f4-272c-4379-a6f3-8247ae0e46cc-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.045852 5037 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/aed636f4-272c-4379-a6f3-8247ae0e46cc-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.045862 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c3c49ff-cf53-4b5b-ba83-10877d499763-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.045874 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m28hp\" (UniqueName: \"kubernetes.io/projected/aed636f4-272c-4379-a6f3-8247ae0e46cc-kube-api-access-m28hp\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.045885 5037 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/aed636f4-272c-4379-a6f3-8247ae0e46cc-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.045896 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4408c030-a5ac-49ae-9361-54cbe3c27108-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.046303 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf" (UID: "6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.046370 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf" (UID: "6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.046393 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-var-run" (OuterVolumeSpecName: "var-run") pod "6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf" (UID: "6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.047966 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-scripts" (OuterVolumeSpecName: "scripts") pod "6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf" (UID: "6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.049700 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4408c030-a5ac-49ae-9361-54cbe3c27108-config-data" (OuterVolumeSpecName: "config-data") pod "4408c030-a5ac-49ae-9361-54cbe3c27108" (UID: "4408c030-a5ac-49ae-9361-54cbe3c27108"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.054412 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4408c030-a5ac-49ae-9361-54cbe3c27108-vencrypt-tls-certs" (OuterVolumeSpecName: "vencrypt-tls-certs") pod "4408c030-a5ac-49ae-9361-54cbe3c27108" (UID: "4408c030-a5ac-49ae-9361-54cbe3c27108"). InnerVolumeSpecName "vencrypt-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.054857 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-kube-api-access-s5rjc" (OuterVolumeSpecName: "kube-api-access-s5rjc") pod "6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf" (UID: "6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf"). InnerVolumeSpecName "kube-api-access-s5rjc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.060396 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aed636f4-272c-4379-a6f3-8247ae0e46cc-config-data" (OuterVolumeSpecName: "config-data") pod "aed636f4-272c-4379-a6f3-8247ae0e46cc" (UID: "aed636f4-272c-4379-a6f3-8247ae0e46cc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.084197 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aed636f4-272c-4379-a6f3-8247ae0e46cc-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "aed636f4-272c-4379-a6f3-8247ae0e46cc" (UID: "aed636f4-272c-4379-a6f3-8247ae0e46cc"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.085623 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-4j78w"] Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.128606 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-4j78w"] Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.158152 5037 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-var-run\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.158193 5037 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/aed636f4-272c-4379-a6f3-8247ae0e46cc-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.158209 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s5rjc\" (UniqueName: \"kubernetes.io/projected/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-kube-api-access-s5rjc\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.158218 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aed636f4-272c-4379-a6f3-8247ae0e46cc-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.158229 5037 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.158241 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4408c030-a5ac-49ae-9361-54cbe3c27108-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.158250 5037 reconciler_common.go:293] "Volume detached for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/4408c030-a5ac-49ae-9361-54cbe3c27108-vencrypt-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.158236 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aed636f4-272c-4379-a6f3-8247ae0e46cc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "aed636f4-272c-4379-a6f3-8247ae0e46cc" (UID: "aed636f4-272c-4379-a6f3-8247ae0e46cc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.159271 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-f9d3-account-create-update-9vvp4"] Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.158258 5037 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.159481 5037 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.171389 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-f9d3-account-create-update-9vvp4"] Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.199672 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4408c030-a5ac-49ae-9361-54cbe3c27108-nova-novncproxy-tls-certs" (OuterVolumeSpecName: "nova-novncproxy-tls-certs") pod "4408c030-a5ac-49ae-9361-54cbe3c27108" (UID: "4408c030-a5ac-49ae-9361-54cbe3c27108"). InnerVolumeSpecName "nova-novncproxy-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.229250 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbicanf9d3-account-delete-qgkj4"] Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.235756 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-galera-0" podUID="bf45bdb2-c880-43f7-b30a-4d1b36363f7d" containerName="galera" containerID="cri-o://df7832f00f3c308d592d1eaea2015808ef735d6b8ec275b8ae637538886591de" gracePeriod=30 Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.238111 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-ovn-controller-tls-certs" (OuterVolumeSpecName: "ovn-controller-tls-certs") pod "6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf" (UID: "6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf"). InnerVolumeSpecName "ovn-controller-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.258848 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-4lgf6"] Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.261175 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aed636f4-272c-4379-a6f3-8247ae0e46cc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.261198 5037 reconciler_common.go:293] "Volume detached for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/4408c030-a5ac-49ae-9361-54cbe3c27108-nova-novncproxy-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.261207 5037 reconciler_common.go:293] "Volume detached for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-ovn-controller-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.263668 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-4lgf6"] Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.273337 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf" (UID: "6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.277850 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-3cec-account-create-update-rnbdt"] Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.285910 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron3cec-account-delete-42tll"] Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.292963 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-3cec-account-create-update-rnbdt"] Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.293012 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aed636f4-272c-4379-a6f3-8247ae0e46cc-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "aed636f4-272c-4379-a6f3-8247ae0e46cc" (UID: "aed636f4-272c-4379-a6f3-8247ae0e46cc"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.300694 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-hsb4f"] Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.306553 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-hsb4f"] Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.311546 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-4d25-account-create-update-rrt49"] Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.317682 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-4d25-account-create-update-rrt49"] Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.329294 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance4d25-account-delete-pftxq"] Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.363643 5037 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/aed636f4-272c-4379-a6f3-8247ae0e46cc-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.363688 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.375453 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.380473 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-f5wmh"] Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.382556 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e" containerName="glance-httpd" probeResult="failure" output="Get \"https://10.217.0.174:9292/healthcheck\": read tcp 10.217.0.2:34244->10.217.0.174:9292: read: connection reset by peer" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.382564 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e" containerName="glance-log" probeResult="failure" output="Get \"https://10.217.0.174:9292/healthcheck\": read tcp 10.217.0.2:34260->10.217.0.174:9292: read: connection reset by peer" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.397040 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novacell12e29-account-delete-vd2zr" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.408022 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-f5wmh"] Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.446022 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinderc68b-account-delete-rphsq"] Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.465642 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-c68b-account-create-update-8xxjl"] Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.480914 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-c68b-account-create-update-8xxjl"] Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.482051 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.566847 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd47ce65-1426-47e2-a5d1-6efd83bac3ab-config-data\") pod \"dd47ce65-1426-47e2-a5d1-6efd83bac3ab\" (UID: \"dd47ce65-1426-47e2-a5d1-6efd83bac3ab\") " Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.566969 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd47ce65-1426-47e2-a5d1-6efd83bac3ab-combined-ca-bundle\") pod \"dd47ce65-1426-47e2-a5d1-6efd83bac3ab\" (UID: \"dd47ce65-1426-47e2-a5d1-6efd83bac3ab\") " Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.567032 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/03ffa609-b428-4a0e-8ec1-5c205391cf7b-operator-scripts\") pod \"03ffa609-b428-4a0e-8ec1-5c205391cf7b\" (UID: \"03ffa609-b428-4a0e-8ec1-5c205391cf7b\") " Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.567166 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hrbtf\" (UniqueName: \"kubernetes.io/projected/dd47ce65-1426-47e2-a5d1-6efd83bac3ab-kube-api-access-hrbtf\") pod \"dd47ce65-1426-47e2-a5d1-6efd83bac3ab\" (UID: \"dd47ce65-1426-47e2-a5d1-6efd83bac3ab\") " Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.567209 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v9vm6\" (UniqueName: \"kubernetes.io/projected/03ffa609-b428-4a0e-8ec1-5c205391cf7b-kube-api-access-v9vm6\") pod \"03ffa609-b428-4a0e-8ec1-5c205391cf7b\" (UID: \"03ffa609-b428-4a0e-8ec1-5c205391cf7b\") " Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.567718 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/03ffa609-b428-4a0e-8ec1-5c205391cf7b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "03ffa609-b428-4a0e-8ec1-5c205391cf7b" (UID: "03ffa609-b428-4a0e-8ec1-5c205391cf7b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.570503 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd47ce65-1426-47e2-a5d1-6efd83bac3ab-kube-api-access-hrbtf" (OuterVolumeSpecName: "kube-api-access-hrbtf") pod "dd47ce65-1426-47e2-a5d1-6efd83bac3ab" (UID: "dd47ce65-1426-47e2-a5d1-6efd83bac3ab"). InnerVolumeSpecName "kube-api-access-hrbtf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.570559 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/03ffa609-b428-4a0e-8ec1-5c205391cf7b-kube-api-access-v9vm6" (OuterVolumeSpecName: "kube-api-access-v9vm6") pod "03ffa609-b428-4a0e-8ec1-5c205391cf7b" (UID: "03ffa609-b428-4a0e-8ec1-5c205391cf7b"). InnerVolumeSpecName "kube-api-access-v9vm6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.591761 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd47ce65-1426-47e2-a5d1-6efd83bac3ab-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dd47ce65-1426-47e2-a5d1-6efd83bac3ab" (UID: "dd47ce65-1426-47e2-a5d1-6efd83bac3ab"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.598884 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd47ce65-1426-47e2-a5d1-6efd83bac3ab-config-data" (OuterVolumeSpecName: "config-data") pod "dd47ce65-1426-47e2-a5d1-6efd83bac3ab" (UID: "dd47ce65-1426-47e2-a5d1-6efd83bac3ab"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.605820 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-854dc8db7d-j5l6c" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.670488 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q6j74\" (UniqueName: \"kubernetes.io/projected/fd171888-b656-4511-af7d-cdff1058bf5f-kube-api-access-q6j74\") pod \"fd171888-b656-4511-af7d-cdff1058bf5f\" (UID: \"fd171888-b656-4511-af7d-cdff1058bf5f\") " Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.670590 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/fd171888-b656-4511-af7d-cdff1058bf5f-kube-state-metrics-tls-certs\") pod \"fd171888-b656-4511-af7d-cdff1058bf5f\" (UID: \"fd171888-b656-4511-af7d-cdff1058bf5f\") " Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.670640 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/fd171888-b656-4511-af7d-cdff1058bf5f-kube-state-metrics-tls-config\") pod \"fd171888-b656-4511-af7d-cdff1058bf5f\" (UID: \"fd171888-b656-4511-af7d-cdff1058bf5f\") " Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.670668 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd171888-b656-4511-af7d-cdff1058bf5f-combined-ca-bundle\") pod \"fd171888-b656-4511-af7d-cdff1058bf5f\" (UID: \"fd171888-b656-4511-af7d-cdff1058bf5f\") " Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.671272 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hrbtf\" (UniqueName: \"kubernetes.io/projected/dd47ce65-1426-47e2-a5d1-6efd83bac3ab-kube-api-access-hrbtf\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.671315 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v9vm6\" (UniqueName: \"kubernetes.io/projected/03ffa609-b428-4a0e-8ec1-5c205391cf7b-kube-api-access-v9vm6\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.671329 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd47ce65-1426-47e2-a5d1-6efd83bac3ab-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.671368 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd47ce65-1426-47e2-a5d1-6efd83bac3ab-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.671380 5037 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/03ffa609-b428-4a0e-8ec1-5c205391cf7b-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.674884 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd171888-b656-4511-af7d-cdff1058bf5f-kube-api-access-q6j74" (OuterVolumeSpecName: "kube-api-access-q6j74") pod "fd171888-b656-4511-af7d-cdff1058bf5f" (UID: "fd171888-b656-4511-af7d-cdff1058bf5f"). InnerVolumeSpecName "kube-api-access-q6j74". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.717778 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd171888-b656-4511-af7d-cdff1058bf5f-kube-state-metrics-tls-config" (OuterVolumeSpecName: "kube-state-metrics-tls-config") pod "fd171888-b656-4511-af7d-cdff1058bf5f" (UID: "fd171888-b656-4511-af7d-cdff1058bf5f"). InnerVolumeSpecName "kube-state-metrics-tls-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.724571 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd171888-b656-4511-af7d-cdff1058bf5f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fd171888-b656-4511-af7d-cdff1058bf5f" (UID: "fd171888-b656-4511-af7d-cdff1058bf5f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.752201 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-m4hls"] Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.757530 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd171888-b656-4511-af7d-cdff1058bf5f-kube-state-metrics-tls-certs" (OuterVolumeSpecName: "kube-state-metrics-tls-certs") pod "fd171888-b656-4511-af7d-cdff1058bf5f" (UID: "fd171888-b656-4511-af7d-cdff1058bf5f"). InnerVolumeSpecName "kube-state-metrics-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.770633 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-m4hls"] Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.772128 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kggbc\" (UniqueName: \"kubernetes.io/projected/c2d75a18-6446-4558-af57-c6e0c957fc3b-kube-api-access-kggbc\") pod \"c2d75a18-6446-4558-af57-c6e0c957fc3b\" (UID: \"c2d75a18-6446-4558-af57-c6e0c957fc3b\") " Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.772215 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c2d75a18-6446-4558-af57-c6e0c957fc3b-internal-tls-certs\") pod \"c2d75a18-6446-4558-af57-c6e0c957fc3b\" (UID: \"c2d75a18-6446-4558-af57-c6e0c957fc3b\") " Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.772278 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2d75a18-6446-4558-af57-c6e0c957fc3b-scripts\") pod \"c2d75a18-6446-4558-af57-c6e0c957fc3b\" (UID: \"c2d75a18-6446-4558-af57-c6e0c957fc3b\") " Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.772321 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2d75a18-6446-4558-af57-c6e0c957fc3b-config-data\") pod \"c2d75a18-6446-4558-af57-c6e0c957fc3b\" (UID: \"c2d75a18-6446-4558-af57-c6e0c957fc3b\") " Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.772462 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2d75a18-6446-4558-af57-c6e0c957fc3b-combined-ca-bundle\") pod \"c2d75a18-6446-4558-af57-c6e0c957fc3b\" (UID: \"c2d75a18-6446-4558-af57-c6e0c957fc3b\") " Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.772496 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2d75a18-6446-4558-af57-c6e0c957fc3b-logs\") pod \"c2d75a18-6446-4558-af57-c6e0c957fc3b\" (UID: \"c2d75a18-6446-4558-af57-c6e0c957fc3b\") " Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.772511 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c2d75a18-6446-4558-af57-c6e0c957fc3b-public-tls-certs\") pod \"c2d75a18-6446-4558-af57-c6e0c957fc3b\" (UID: \"c2d75a18-6446-4558-af57-c6e0c957fc3b\") " Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.772868 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q6j74\" (UniqueName: \"kubernetes.io/projected/fd171888-b656-4511-af7d-cdff1058bf5f-kube-api-access-q6j74\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.772885 5037 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/fd171888-b656-4511-af7d-cdff1058bf5f-kube-state-metrics-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.772894 5037 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/fd171888-b656-4511-af7d-cdff1058bf5f-kube-state-metrics-tls-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.772903 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd171888-b656-4511-af7d-cdff1058bf5f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.773642 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c2d75a18-6446-4558-af57-c6e0c957fc3b-logs" (OuterVolumeSpecName: "logs") pod "c2d75a18-6446-4558-af57-c6e0c957fc3b" (UID: "c2d75a18-6446-4558-af57-c6e0c957fc3b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.775367 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2d75a18-6446-4558-af57-c6e0c957fc3b-kube-api-access-kggbc" (OuterVolumeSpecName: "kube-api-access-kggbc") pod "c2d75a18-6446-4558-af57-c6e0c957fc3b" (UID: "c2d75a18-6446-4558-af57-c6e0c957fc3b"). InnerVolumeSpecName "kube-api-access-kggbc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.776498 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2d75a18-6446-4558-af57-c6e0c957fc3b-scripts" (OuterVolumeSpecName: "scripts") pod "c2d75a18-6446-4558-af57-c6e0c957fc3b" (UID: "c2d75a18-6446-4558-af57-c6e0c957fc3b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.778981 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-e6d6-account-create-update-jg9lh"] Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.785477 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell0e6d6-account-delete-j5w7q"] Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.795595 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-e6d6-account-create-update-jg9lh"] Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.834475 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2d75a18-6446-4558-af57-c6e0c957fc3b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c2d75a18-6446-4558-af57-c6e0c957fc3b" (UID: "c2d75a18-6446-4558-af57-c6e0c957fc3b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.874923 5037 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2d75a18-6446-4558-af57-c6e0c957fc3b-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.874955 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2d75a18-6446-4558-af57-c6e0c957fc3b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.874967 5037 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2d75a18-6446-4558-af57-c6e0c957fc3b-logs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.874977 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kggbc\" (UniqueName: \"kubernetes.io/projected/c2d75a18-6446-4558-af57-c6e0c957fc3b-kube-api-access-kggbc\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.899240 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2d75a18-6446-4558-af57-c6e0c957fc3b-config-data" (OuterVolumeSpecName: "config-data") pod "c2d75a18-6446-4558-af57-c6e0c957fc3b" (UID: "c2d75a18-6446-4558-af57-c6e0c957fc3b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.901872 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"dd47ce65-1426-47e2-a5d1-6efd83bac3ab","Type":"ContainerDied","Data":"987c501de9a8908355bbdb779a488b1aae86858303bf80cd01d671d920bb722b"} Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.901928 5037 scope.go:117] "RemoveContainer" containerID="cafd25254996ab2af2a3389cdf0cdcd2a0d515e80e24d59e43ba4b1e34bf696b" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.902073 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.937953 5037 generic.go:334] "Generic (PLEG): container finished" podID="d4fd340f-f656-4ec3-aba1-a33eaa58aed0" containerID="3dcbf6703b46c5a8172c41249335b346fb201a3ba3a13f8712f5740aef9fb594" exitCode=0 Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.959998 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.961142 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.961686 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e421f2b-ccc3-44f2-9646-c51aba1c5706" path="/var/lib/kubelet/pods/0e421f2b-ccc3-44f2-9646-c51aba1c5706/volumes" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.963679 5037 generic.go:334] "Generic (PLEG): container finished" podID="fd171888-b656-4511-af7d-cdff1058bf5f" containerID="9f6f522c179ad5b2c8f6d172e255b1812ec80e1f19a7f152457199761310283e" exitCode=2 Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.963765 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.966602 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="12c2cbda-612f-4d50-a8ff-a4a893fd62ea" path="/var/lib/kubelet/pods/12c2cbda-612f-4d50-a8ff-a4a893fd62ea/volumes" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.972005 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29d2e7ad-bebe-4d6e-8508-54dc55c7ca2e" path="/var/lib/kubelet/pods/29d2e7ad-bebe-4d6e-8508-54dc55c7ca2e/volumes" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.972755 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="30f3badb-48be-4d24-8f2a-7a3622f0f720" path="/var/lib/kubelet/pods/30f3badb-48be-4d24-8f2a-7a3622f0f720/volumes" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.973381 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ae25f93-9fa1-42e0-8a13-984460bdd087" path="/var/lib/kubelet/pods/3ae25f93-9fa1-42e0-8a13-984460bdd087/volumes" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.975106 5037 generic.go:334] "Generic (PLEG): container finished" podID="2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e" containerID="38c16d94870f4c7e1e940ce264a006c31e02b0edcd3c7f37d2a4b79a7684dec0" exitCode=0 Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.975206 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.977322 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-7978b45fdd-7t6zc" podUID="334f3bb7-793e-4cff-b0ef-de24dc8a46b5" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.156:9311/healthcheck\": read tcp 10.217.0.2:39330->10.217.0.156:9311: read: connection reset by peer" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.977529 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-7978b45fdd-7t6zc" podUID="334f3bb7-793e-4cff-b0ef-de24dc8a46b5" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.156:9311/healthcheck\": read tcp 10.217.0.2:39338->10.217.0.156:9311: read: connection reset by peer" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.978926 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2d75a18-6446-4558-af57-c6e0c957fc3b-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.978930 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6520b35e-439b-4178-ad5e-9312d57c0fc5" path="/var/lib/kubelet/pods/6520b35e-439b-4178-ad5e-9312d57c0fc5/volumes" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.979779 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="70ae4ec3-44f5-4978-ba4a-31d762f0d748" path="/var/lib/kubelet/pods/70ae4ec3-44f5-4978-ba4a-31d762f0d748/volumes" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.986603 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="791704a5-2365-4a7a-9cb5-5512a543aab2" path="/var/lib/kubelet/pods/791704a5-2365-4a7a-9cb5-5512a543aab2/volumes" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.987574 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9338ce30-fd3e-4fa1-bbc3-5f73cbba9662" path="/var/lib/kubelet/pods/9338ce30-fd3e-4fa1-bbc3-5f73cbba9662/volumes" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.988047 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a03d1e71-eb0c-4ec4-8d33-39535460bc50" path="/var/lib/kubelet/pods/a03d1e71-eb0c-4ec4-8d33-39535460bc50/volumes" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.988513 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a9f650aa-da8e-4fe4-ab8f-980adc19129a" path="/var/lib/kubelet/pods/a9f650aa-da8e-4fe4-ab8f-980adc19129a/volumes" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.989532 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9a76d5f-5242-4fbf-a824-970d9ffcc3ad" path="/var/lib/kubelet/pods/b9a76d5f-5242-4fbf-a824-970d9ffcc3ad/volumes" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.989998 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be3a8314-c6a3-4c32-982e-47a36ea01821" path="/var/lib/kubelet/pods/be3a8314-c6a3-4c32-982e-47a36ea01821/volumes" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.990523 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c6afbf28-95dd-4597-ac5d-f3735515e1b2" path="/var/lib/kubelet/pods/c6afbf28-95dd-4597-ac5d-f3735515e1b2/volumes" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.990972 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ec74497e-0217-404a-8cb1-510ccc6cba50" path="/var/lib/kubelet/pods/ec74497e-0217-404a-8cb1-510ccc6cba50/volumes" Nov 26 14:44:07 crc kubenswrapper[5037]: I1126 14:44:07.991989 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ece5ba73-c8c1-4d22-ac0c-411c8c59e969" path="/var/lib/kubelet/pods/ece5ba73-c8c1-4d22-ac0c-411c8c59e969/volumes" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.001760 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novacell12e29-account-delete-vd2zr" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.006305 5037 generic.go:334] "Generic (PLEG): container finished" podID="bdd4849b-e92e-473d-88d0-74c060c04eb7" containerID="ac800c71f24567e467410fb7c333d7691707d518b6f0f84492f89244a18f9205" exitCode=0 Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.025442 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2d75a18-6446-4558-af57-c6e0c957fc3b-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "c2d75a18-6446-4558-af57-c6e0c957fc3b" (UID: "c2d75a18-6446-4558-af57-c6e0c957fc3b"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.032986 5037 generic.go:334] "Generic (PLEG): container finished" podID="c2d75a18-6446-4558-af57-c6e0c957fc3b" containerID="ae38d038fad3bbc384e79c4d7f1e060c20c2d38b3e29519ec6a7891fc4ff742b" exitCode=0 Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.033108 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-854dc8db7d-j5l6c" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.040029 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-7f55999cfc-jx9r6" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.066798 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.198:8775/\": read tcp 10.217.0.2:53256->10.217.0.198:8775: read: connection reset by peer" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.067176 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.198:8775/\": read tcp 10.217.0.2:53246->10.217.0.198:8775: read: connection reset by peer" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.084573 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-httpd-run\") pod \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\" (UID: \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\") " Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.084708 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-scripts\") pod \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\" (UID: \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\") " Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.084742 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-combined-ca-bundle\") pod \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\" (UID: \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\") " Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.084782 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zx96r\" (UniqueName: \"kubernetes.io/projected/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-kube-api-access-zx96r\") pod \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\" (UID: \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\") " Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.084827 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-internal-tls-certs\") pod \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\" (UID: \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\") " Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.084862 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\" (UID: \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\") " Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.084906 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-config-data\") pod \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\" (UID: \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\") " Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.084941 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-logs\") pod \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\" (UID: \"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e\") " Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.085338 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e" (UID: "2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.086186 5037 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c2d75a18-6446-4558-af57-c6e0c957fc3b-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.086207 5037 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.086795 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-logs" (OuterVolumeSpecName: "logs") pod "2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e" (UID: "2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.089516 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-kube-api-access-zx96r" (OuterVolumeSpecName: "kube-api-access-zx96r") pod "2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e" (UID: "2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e"). InnerVolumeSpecName "kube-api-access-zx96r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.093068 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.101892 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ptz2q_6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf/ovn-controller/0.log" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.102077 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ptz2q" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.113788 5037 generic.go:334] "Generic (PLEG): container finished" podID="5b53df32-369f-4a91-bb97-5da067cc3c6a" containerID="79410aa1ae42d0ccbb1f24403b971fbdf2b68b70ddace0cddb3256c93023bbb4" exitCode=0 Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.133914 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "glance") pod "2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e" (UID: "2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.135510 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-scripts" (OuterVolumeSpecName: "scripts") pod "2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e" (UID: "2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.160733 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="07720f90-b6f7-4b81-9c32-17f1e72b19fa" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.162:8776/healthcheck\": read tcp 10.217.0.2:54314->10.217.0.162:8776: read: connection reset by peer" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.168117 5037 generic.go:334] "Generic (PLEG): container finished" podID="7b90229c-2a39-4627-896f-9c1b27e4f1d5" containerID="4b66f0aa9cd359a08c2fc701fbd668ab4e74119711476d65846830c7024d146e" exitCode=0 Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.168149 5037 generic.go:334] "Generic (PLEG): container finished" podID="7b90229c-2a39-4627-896f-9c1b27e4f1d5" containerID="e755b8c60d9bc3fba924bee940809c862f89fc6885ca06dec0c4232e6e6116ba" exitCode=2 Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.168158 5037 generic.go:334] "Generic (PLEG): container finished" podID="7b90229c-2a39-4627-896f-9c1b27e4f1d5" containerID="d9e7e3cabf68f8c77fe540ee66229c4f639270ea37015bbb512cd6402e09b909" exitCode=0 Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.183745 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2d75a18-6446-4558-af57-c6e0c957fc3b-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "c2d75a18-6446-4558-af57-c6e0c957fc3b" (UID: "c2d75a18-6446-4558-af57-c6e0c957fc3b"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.199718 5037 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.199749 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zx96r\" (UniqueName: \"kubernetes.io/projected/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-kube-api-access-zx96r\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.199761 5037 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c2d75a18-6446-4558-af57-c6e0c957fc3b-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.199781 5037 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.199792 5037 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-logs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.233599 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e" (UID: "2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.256125 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e" (UID: "2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.271149 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-config-data" (OuterVolumeSpecName: "config-data") pod "2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e" (UID: "2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.272562 5037 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.308487 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.308514 5037 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.308522 5037 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.308531 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.604834 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell0e6d6-account-delete-j5w7q" event={"ID":"d4fd340f-f656-4ec3-aba1-a33eaa58aed0","Type":"ContainerDied","Data":"3dcbf6703b46c5a8172c41249335b346fb201a3ba3a13f8712f5740aef9fb594"} Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.604893 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9c3c49ff-cf53-4b5b-ba83-10877d499763","Type":"ContainerDied","Data":"38e70d5032c83e2af81154618fd9ef141af24eb0ab39c42a6c5f5f453ba0b9af"} Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.604914 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-5zbnt"] Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.604932 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"fd171888-b656-4511-af7d-cdff1058bf5f","Type":"ContainerDied","Data":"9f6f522c179ad5b2c8f6d172e255b1812ec80e1f19a7f152457199761310283e"} Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.604971 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-5zbnt"] Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.604991 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"fd171888-b656-4511-af7d-cdff1058bf5f","Type":"ContainerDied","Data":"d4e2dc7ebb7a8511c34726eae35d408c89b1328abbddcd1cf083a81227b4e83d"} Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.605006 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novaapieb2b-account-delete-988tl"] Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.605022 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-eb2b-account-create-update-92xtm"] Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.605058 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e","Type":"ContainerDied","Data":"38c16d94870f4c7e1e940ce264a006c31e02b0edcd3c7f37d2a4b79a7684dec0"} Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.605073 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell12e29-account-delete-vd2zr" event={"ID":"03ffa609-b428-4a0e-8ec1-5c205391cf7b","Type":"ContainerDied","Data":"07efdc2b707f4665d7954a7295427640db0b0cc66e0a067b1b9c787a54fd5add"} Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.605076 5037 scope.go:117] "RemoveContainer" containerID="b37dad75bebb2e8ff92fd84a2c83e4b7a2ff235be32f88191ebf7baf5089d611" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.605087 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-eb2b-account-create-update-92xtm"] Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.605321 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"bdd4849b-e92e-473d-88d0-74c060c04eb7","Type":"ContainerDied","Data":"ac800c71f24567e467410fb7c333d7691707d518b6f0f84492f89244a18f9205"} Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.605339 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.605354 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.605372 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-854dc8db7d-j5l6c" event={"ID":"c2d75a18-6446-4558-af57-c6e0c957fc3b","Type":"ContainerDied","Data":"ae38d038fad3bbc384e79c4d7f1e060c20c2d38b3e29519ec6a7891fc4ff742b"} Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.605384 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-854dc8db7d-j5l6c" event={"ID":"c2d75a18-6446-4558-af57-c6e0c957fc3b","Type":"ContainerDied","Data":"23c68f217ca0c448f87dae9e2be4b25a2fb59c81999e180f1fb5799aa26a1180"} Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.605398 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7f55999cfc-jx9r6" event={"ID":"aed636f4-272c-4379-a6f3-8247ae0e46cc","Type":"ContainerDied","Data":"f1391f198bbb2adeeea1f5c8a5548c186c4cc57e19e6dd816b8890f1380cf087"} Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.605464 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"4408c030-a5ac-49ae-9361-54cbe3c27108","Type":"ContainerDied","Data":"33d3a546ce44fa80f3ce312259e709179bc4d386efc53586e603557016ee9221"} Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.605481 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ptz2q" event={"ID":"6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf","Type":"ContainerDied","Data":"18687e603da421cde9f2a22d55f4b6415ac7f69c74a6c269c2132a301d73bcab"} Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.605492 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance4d25-account-delete-pftxq" event={"ID":"5b53df32-369f-4a91-bb97-5da067cc3c6a","Type":"ContainerDied","Data":"79410aa1ae42d0ccbb1f24403b971fbdf2b68b70ddace0cddb3256c93023bbb4"} Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.605502 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7b90229c-2a39-4627-896f-9c1b27e4f1d5","Type":"ContainerDied","Data":"4b66f0aa9cd359a08c2fc701fbd668ab4e74119711476d65846830c7024d146e"} Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.605512 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7b90229c-2a39-4627-896f-9c1b27e4f1d5","Type":"ContainerDied","Data":"e755b8c60d9bc3fba924bee940809c862f89fc6885ca06dec0c4232e6e6116ba"} Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.605521 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7b90229c-2a39-4627-896f-9c1b27e4f1d5","Type":"ContainerDied","Data":"d9e7e3cabf68f8c77fe540ee66229c4f639270ea37015bbb512cd6402e09b909"} Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.672654 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.689546 5037 scope.go:117] "RemoveContainer" containerID="9f6f522c179ad5b2c8f6d172e255b1812ec80e1f19a7f152457199761310283e" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.715498 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.744145 5037 scope.go:117] "RemoveContainer" containerID="9f6f522c179ad5b2c8f6d172e255b1812ec80e1f19a7f152457199761310283e" Nov 26 14:44:08 crc kubenswrapper[5037]: E1126 14:44:08.745071 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9f6f522c179ad5b2c8f6d172e255b1812ec80e1f19a7f152457199761310283e\": container with ID starting with 9f6f522c179ad5b2c8f6d172e255b1812ec80e1f19a7f152457199761310283e not found: ID does not exist" containerID="9f6f522c179ad5b2c8f6d172e255b1812ec80e1f19a7f152457199761310283e" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.745113 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f6f522c179ad5b2c8f6d172e255b1812ec80e1f19a7f152457199761310283e"} err="failed to get container status \"9f6f522c179ad5b2c8f6d172e255b1812ec80e1f19a7f152457199761310283e\": rpc error: code = NotFound desc = could not find container \"9f6f522c179ad5b2c8f6d172e255b1812ec80e1f19a7f152457199761310283e\": container with ID starting with 9f6f522c179ad5b2c8f6d172e255b1812ec80e1f19a7f152457199761310283e not found: ID does not exist" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.745143 5037 scope.go:117] "RemoveContainer" containerID="38c16d94870f4c7e1e940ce264a006c31e02b0edcd3c7f37d2a4b79a7684dec0" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.751050 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ptz2q"] Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.760119 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-ptz2q"] Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.761393 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.799378 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.816807 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bdd4849b-e92e-473d-88d0-74c060c04eb7-combined-ca-bundle\") pod \"bdd4849b-e92e-473d-88d0-74c060c04eb7\" (UID: \"bdd4849b-e92e-473d-88d0-74c060c04eb7\") " Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.816864 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/300dce8f-4337-4707-8075-f32b93f03e4f-config-data-generated\") pod \"300dce8f-4337-4707-8075-f32b93f03e4f\" (UID: \"300dce8f-4337-4707-8075-f32b93f03e4f\") " Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.816894 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/bdd4849b-e92e-473d-88d0-74c060c04eb7-kolla-config\") pod \"bdd4849b-e92e-473d-88d0-74c060c04eb7\" (UID: \"bdd4849b-e92e-473d-88d0-74c060c04eb7\") " Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.816926 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/300dce8f-4337-4707-8075-f32b93f03e4f-operator-scripts\") pod \"300dce8f-4337-4707-8075-f32b93f03e4f\" (UID: \"300dce8f-4337-4707-8075-f32b93f03e4f\") " Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.816961 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/300dce8f-4337-4707-8075-f32b93f03e4f-kolla-config\") pod \"300dce8f-4337-4707-8075-f32b93f03e4f\" (UID: \"300dce8f-4337-4707-8075-f32b93f03e4f\") " Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.817001 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xmtdx\" (UniqueName: \"kubernetes.io/projected/bdd4849b-e92e-473d-88d0-74c060c04eb7-kube-api-access-xmtdx\") pod \"bdd4849b-e92e-473d-88d0-74c060c04eb7\" (UID: \"bdd4849b-e92e-473d-88d0-74c060c04eb7\") " Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.817033 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/bdd4849b-e92e-473d-88d0-74c060c04eb7-memcached-tls-certs\") pod \"bdd4849b-e92e-473d-88d0-74c060c04eb7\" (UID: \"bdd4849b-e92e-473d-88d0-74c060c04eb7\") " Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.817091 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m6gnn\" (UniqueName: \"kubernetes.io/projected/300dce8f-4337-4707-8075-f32b93f03e4f-kube-api-access-m6gnn\") pod \"300dce8f-4337-4707-8075-f32b93f03e4f\" (UID: \"300dce8f-4337-4707-8075-f32b93f03e4f\") " Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.817116 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/300dce8f-4337-4707-8075-f32b93f03e4f-config-data-default\") pod \"300dce8f-4337-4707-8075-f32b93f03e4f\" (UID: \"300dce8f-4337-4707-8075-f32b93f03e4f\") " Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.817147 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/300dce8f-4337-4707-8075-f32b93f03e4f-galera-tls-certs\") pod \"300dce8f-4337-4707-8075-f32b93f03e4f\" (UID: \"300dce8f-4337-4707-8075-f32b93f03e4f\") " Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.817199 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"300dce8f-4337-4707-8075-f32b93f03e4f\" (UID: \"300dce8f-4337-4707-8075-f32b93f03e4f\") " Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.817238 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bdd4849b-e92e-473d-88d0-74c060c04eb7-config-data\") pod \"bdd4849b-e92e-473d-88d0-74c060c04eb7\" (UID: \"bdd4849b-e92e-473d-88d0-74c060c04eb7\") " Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.817273 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/300dce8f-4337-4707-8075-f32b93f03e4f-combined-ca-bundle\") pod \"300dce8f-4337-4707-8075-f32b93f03e4f\" (UID: \"300dce8f-4337-4707-8075-f32b93f03e4f\") " Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.821067 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/300dce8f-4337-4707-8075-f32b93f03e4f-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "300dce8f-4337-4707-8075-f32b93f03e4f" (UID: "300dce8f-4337-4707-8075-f32b93f03e4f"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.847661 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/300dce8f-4337-4707-8075-f32b93f03e4f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "300dce8f-4337-4707-8075-f32b93f03e4f" (UID: "300dce8f-4337-4707-8075-f32b93f03e4f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.848605 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bdd4849b-e92e-473d-88d0-74c060c04eb7-kube-api-access-xmtdx" (OuterVolumeSpecName: "kube-api-access-xmtdx") pod "bdd4849b-e92e-473d-88d0-74c060c04eb7" (UID: "bdd4849b-e92e-473d-88d0-74c060c04eb7"). InnerVolumeSpecName "kube-api-access-xmtdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.849575 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bdd4849b-e92e-473d-88d0-74c060c04eb7-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "bdd4849b-e92e-473d-88d0-74c060c04eb7" (UID: "bdd4849b-e92e-473d-88d0-74c060c04eb7"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.849964 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/300dce8f-4337-4707-8075-f32b93f03e4f-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "300dce8f-4337-4707-8075-f32b93f03e4f" (UID: "300dce8f-4337-4707-8075-f32b93f03e4f"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.850005 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.851787 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bdd4849b-e92e-473d-88d0-74c060c04eb7-config-data" (OuterVolumeSpecName: "config-data") pod "bdd4849b-e92e-473d-88d0-74c060c04eb7" (UID: "bdd4849b-e92e-473d-88d0-74c060c04eb7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.851845 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/300dce8f-4337-4707-8075-f32b93f03e4f-kube-api-access-m6gnn" (OuterVolumeSpecName: "kube-api-access-m6gnn") pod "300dce8f-4337-4707-8075-f32b93f03e4f" (UID: "300dce8f-4337-4707-8075-f32b93f03e4f"). InnerVolumeSpecName "kube-api-access-m6gnn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.852042 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/300dce8f-4337-4707-8075-f32b93f03e4f-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "300dce8f-4337-4707-8075-f32b93f03e4f" (UID: "300dce8f-4337-4707-8075-f32b93f03e4f"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.876493 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bdd4849b-e92e-473d-88d0-74c060c04eb7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bdd4849b-e92e-473d-88d0-74c060c04eb7" (UID: "bdd4849b-e92e-473d-88d0-74c060c04eb7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.881014 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.887931 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "mysql-db") pod "300dce8f-4337-4707-8075-f32b93f03e4f" (UID: "300dce8f-4337-4707-8075-f32b93f03e4f"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.888466 5037 scope.go:117] "RemoveContainer" containerID="7193d230cf98d6ea21211158364885315519bd51ee3bfb69a0d77702bb8f27cf" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.889894 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.897939 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.905116 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.911936 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.919213 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bdd4849b-e92e-473d-88d0-74c060c04eb7-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.919239 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bdd4849b-e92e-473d-88d0-74c060c04eb7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.919250 5037 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/300dce8f-4337-4707-8075-f32b93f03e4f-config-data-generated\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.919258 5037 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/bdd4849b-e92e-473d-88d0-74c060c04eb7-kolla-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.919267 5037 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/300dce8f-4337-4707-8075-f32b93f03e4f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.919274 5037 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/300dce8f-4337-4707-8075-f32b93f03e4f-kolla-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.919309 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xmtdx\" (UniqueName: \"kubernetes.io/projected/bdd4849b-e92e-473d-88d0-74c060c04eb7-kube-api-access-xmtdx\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.919319 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m6gnn\" (UniqueName: \"kubernetes.io/projected/300dce8f-4337-4707-8075-f32b93f03e4f-kube-api-access-m6gnn\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.919327 5037 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/300dce8f-4337-4707-8075-f32b93f03e4f-config-data-default\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.919347 5037 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.930020 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/300dce8f-4337-4707-8075-f32b93f03e4f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "300dce8f-4337-4707-8075-f32b93f03e4f" (UID: "300dce8f-4337-4707-8075-f32b93f03e4f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.930109 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-7f55999cfc-jx9r6"] Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.931040 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-proxy-7f55999cfc-jx9r6"] Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.941409 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/300dce8f-4337-4707-8075-f32b93f03e4f-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "300dce8f-4337-4707-8075-f32b93f03e4f" (UID: "300dce8f-4337-4707-8075-f32b93f03e4f"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.943440 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell12e29-account-delete-vd2zr"] Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.961920 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/novacell12e29-account-delete-vd2zr"] Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.968519 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bdd4849b-e92e-473d-88d0-74c060c04eb7-memcached-tls-certs" (OuterVolumeSpecName: "memcached-tls-certs") pod "bdd4849b-e92e-473d-88d0-74c060c04eb7" (UID: "bdd4849b-e92e-473d-88d0-74c060c04eb7"). InnerVolumeSpecName "memcached-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.976807 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-854dc8db7d-j5l6c"] Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.978332 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7978b45fdd-7t6zc" Nov 26 14:44:08 crc kubenswrapper[5037]: I1126 14:44:08.980066 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement259c-account-delete-rj8qs" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.001391 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-854dc8db7d-j5l6c"] Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.020974 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/300dce8f-4337-4707-8075-f32b93f03e4f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.021008 5037 reconciler_common.go:293] "Volume detached for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/bdd4849b-e92e-473d-88d0-74c060c04eb7-memcached-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.021019 5037 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/300dce8f-4337-4707-8075-f32b93f03e4f-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.027368 5037 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.027680 5037 scope.go:117] "RemoveContainer" containerID="771c3c79a09cab53a7b40d533ff448b485921c9b6c6b640a07a8ddd8e73a21e8" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.121964 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-logs\") pod \"334f3bb7-793e-4cff-b0ef-de24dc8a46b5\" (UID: \"334f3bb7-793e-4cff-b0ef-de24dc8a46b5\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.122088 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-config-data-custom\") pod \"334f3bb7-793e-4cff-b0ef-de24dc8a46b5\" (UID: \"334f3bb7-793e-4cff-b0ef-de24dc8a46b5\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.122141 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-combined-ca-bundle\") pod \"334f3bb7-793e-4cff-b0ef-de24dc8a46b5\" (UID: \"334f3bb7-793e-4cff-b0ef-de24dc8a46b5\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.122167 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-public-tls-certs\") pod \"334f3bb7-793e-4cff-b0ef-de24dc8a46b5\" (UID: \"334f3bb7-793e-4cff-b0ef-de24dc8a46b5\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.122182 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-internal-tls-certs\") pod \"334f3bb7-793e-4cff-b0ef-de24dc8a46b5\" (UID: \"334f3bb7-793e-4cff-b0ef-de24dc8a46b5\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.122205 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a7ece585-54a5-40d4-866f-98c968f03910-operator-scripts\") pod \"a7ece585-54a5-40d4-866f-98c968f03910\" (UID: \"a7ece585-54a5-40d4-866f-98c968f03910\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.122231 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jgrm7\" (UniqueName: \"kubernetes.io/projected/a7ece585-54a5-40d4-866f-98c968f03910-kube-api-access-jgrm7\") pod \"a7ece585-54a5-40d4-866f-98c968f03910\" (UID: \"a7ece585-54a5-40d4-866f-98c968f03910\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.122270 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-295fx\" (UniqueName: \"kubernetes.io/projected/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-kube-api-access-295fx\") pod \"334f3bb7-793e-4cff-b0ef-de24dc8a46b5\" (UID: \"334f3bb7-793e-4cff-b0ef-de24dc8a46b5\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.122506 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-config-data\") pod \"334f3bb7-793e-4cff-b0ef-de24dc8a46b5\" (UID: \"334f3bb7-793e-4cff-b0ef-de24dc8a46b5\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.123000 5037 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.124026 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-logs" (OuterVolumeSpecName: "logs") pod "334f3bb7-793e-4cff-b0ef-de24dc8a46b5" (UID: "334f3bb7-793e-4cff-b0ef-de24dc8a46b5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.124376 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a7ece585-54a5-40d4-866f-98c968f03910-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a7ece585-54a5-40d4-866f-98c968f03910" (UID: "a7ece585-54a5-40d4-866f-98c968f03910"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.128250 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-kube-api-access-295fx" (OuterVolumeSpecName: "kube-api-access-295fx") pod "334f3bb7-793e-4cff-b0ef-de24dc8a46b5" (UID: "334f3bb7-793e-4cff-b0ef-de24dc8a46b5"). InnerVolumeSpecName "kube-api-access-295fx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.129591 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "334f3bb7-793e-4cff-b0ef-de24dc8a46b5" (UID: "334f3bb7-793e-4cff-b0ef-de24dc8a46b5"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.130900 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7ece585-54a5-40d4-866f-98c968f03910-kube-api-access-jgrm7" (OuterVolumeSpecName: "kube-api-access-jgrm7") pod "a7ece585-54a5-40d4-866f-98c968f03910" (UID: "a7ece585-54a5-40d4-866f-98c968f03910"). InnerVolumeSpecName "kube-api-access-jgrm7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.158862 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "334f3bb7-793e-4cff-b0ef-de24dc8a46b5" (UID: "334f3bb7-793e-4cff-b0ef-de24dc8a46b5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.182460 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-config-data" (OuterVolumeSpecName: "config-data") pod "334f3bb7-793e-4cff-b0ef-de24dc8a46b5" (UID: "334f3bb7-793e-4cff-b0ef-de24dc8a46b5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.184735 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapieb2b-account-delete-988tl" event={"ID":"8df4197d-046b-4b35-a14a-b382bda46242","Type":"ContainerDied","Data":"d72a3e29fa0941334f039a0f517223c2c8372f404345b575bc7e1b78606d6be3"} Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.184777 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d72a3e29fa0941334f039a0f517223c2c8372f404345b575bc7e1b78606d6be3" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.184846 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "334f3bb7-793e-4cff-b0ef-de24dc8a46b5" (UID: "334f3bb7-793e-4cff-b0ef-de24dc8a46b5"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.214459 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement259c-account-delete-rj8qs" event={"ID":"a7ece585-54a5-40d4-866f-98c968f03910","Type":"ContainerDied","Data":"ea907abbcf61858031d578ef9e3277159eecf177979939e7289a8d1fb98b9156"} Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.214492 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement259c-account-delete-rj8qs" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.214503 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ea907abbcf61858031d578ef9e3277159eecf177979939e7289a8d1fb98b9156" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.214935 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "334f3bb7-793e-4cff-b0ef-de24dc8a46b5" (UID: "334f3bb7-793e-4cff-b0ef-de24dc8a46b5"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.225092 5037 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-logs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.225117 5037 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.225132 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.225142 5037 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.225153 5037 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.225143 5037 generic.go:334] "Generic (PLEG): container finished" podID="e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e" containerID="33dcd0b34b2f2fdf22fdb535aa2524ac7c392d11aebfe3891b1a520355c97e29" exitCode=0 Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.225191 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e","Type":"ContainerDied","Data":"33dcd0b34b2f2fdf22fdb535aa2524ac7c392d11aebfe3891b1a520355c97e29"} Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.225229 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e","Type":"ContainerDied","Data":"8ab7134009058e52030c6f00fcc6be72d410ef8854fda4b58df18f65f38fabcf"} Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.225241 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8ab7134009058e52030c6f00fcc6be72d410ef8854fda4b58df18f65f38fabcf" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.225165 5037 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a7ece585-54a5-40d4-866f-98c968f03910-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.225277 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jgrm7\" (UniqueName: \"kubernetes.io/projected/a7ece585-54a5-40d4-866f-98c968f03910-kube-api-access-jgrm7\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.225337 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-295fx\" (UniqueName: \"kubernetes.io/projected/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-kube-api-access-295fx\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.225353 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/334f3bb7-793e-4cff-b0ef-de24dc8a46b5-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.229605 5037 generic.go:334] "Generic (PLEG): container finished" podID="334f3bb7-793e-4cff-b0ef-de24dc8a46b5" containerID="2f6e30bd74ea66c491e2959c075dfac83aa041657baf37104388b43c5d325007" exitCode=0 Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.229724 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7978b45fdd-7t6zc" event={"ID":"334f3bb7-793e-4cff-b0ef-de24dc8a46b5","Type":"ContainerDied","Data":"2f6e30bd74ea66c491e2959c075dfac83aa041657baf37104388b43c5d325007"} Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.229819 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7978b45fdd-7t6zc" event={"ID":"334f3bb7-793e-4cff-b0ef-de24dc8a46b5","Type":"ContainerDied","Data":"8556bd1ed449e1303e1a497174956d6682f8d0558538d369de30eb6d4ea4b300"} Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.229926 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7978b45fdd-7t6zc" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.238594 5037 generic.go:334] "Generic (PLEG): container finished" podID="cff988a9-69e2-42cc-a456-426f13be8a58" containerID="cb5de0febf4f6869c6113a77abea3425966e60873437776dd7f265ea84cd9709" exitCode=0 Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.238664 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cff988a9-69e2-42cc-a456-426f13be8a58","Type":"ContainerDied","Data":"cb5de0febf4f6869c6113a77abea3425966e60873437776dd7f265ea84cd9709"} Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.244644 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"bdd4849b-e92e-473d-88d0-74c060c04eb7","Type":"ContainerDied","Data":"e0cff50483b1be8a51565b59ec847e38294733a3692b2001252edeee0e2dc5bf"} Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.244736 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.257210 5037 generic.go:334] "Generic (PLEG): container finished" podID="6d49cc40-ce20-415f-a979-398430c2bd81" containerID="08aa4f4dbe17185b559c1307060da7ba09ed7694916c81cee021536293b3f886" exitCode=0 Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.257316 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7c767587b5-nzlv9" event={"ID":"6d49cc40-ce20-415f-a979-398430c2bd81","Type":"ContainerDied","Data":"08aa4f4dbe17185b559c1307060da7ba09ed7694916c81cee021536293b3f886"} Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.257370 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7c767587b5-nzlv9" event={"ID":"6d49cc40-ce20-415f-a979-398430c2bd81","Type":"ContainerDied","Data":"dcb533ef817930e484aa34688ca5ad57eb45ee0e14ade15d278098b83b1288a0"} Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.257387 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dcb533ef817930e484aa34688ca5ad57eb45ee0e14ade15d278098b83b1288a0" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.261798 5037 generic.go:334] "Generic (PLEG): container finished" podID="300dce8f-4337-4707-8075-f32b93f03e4f" containerID="b153954d737c10034799ce4a540d151fbb5420d7fc983a9615870845e76fb0be" exitCode=0 Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.261846 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"300dce8f-4337-4707-8075-f32b93f03e4f","Type":"ContainerDied","Data":"b153954d737c10034799ce4a540d151fbb5420d7fc983a9615870845e76fb0be"} Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.261875 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"300dce8f-4337-4707-8075-f32b93f03e4f","Type":"ContainerDied","Data":"c21db9721ff60f887a92e254c47928c31b0ebd15e9dd0d260534a5c1184d00c8"} Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.261949 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.273317 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbicanf9d3-account-delete-qgkj4" event={"ID":"b3dc5e2c-0729-4f4d-8481-bd8fb0064a80","Type":"ContainerDied","Data":"f00b77b3c11461b44434170237716153a3fb1bc043b240f76ba063db66fd2cfd"} Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.273348 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f00b77b3c11461b44434170237716153a3fb1bc043b240f76ba063db66fd2cfd" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.278231 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novaapieb2b-account-delete-988tl" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.303972 5037 scope.go:117] "RemoveContainer" containerID="ae38d038fad3bbc384e79c4d7f1e060c20c2d38b3e29519ec6a7891fc4ff742b" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.304831 5037 generic.go:334] "Generic (PLEG): container finished" podID="07720f90-b6f7-4b81-9c32-17f1e72b19fa" containerID="6c728b7a4bd6db17ff62032233cd9d220168f2c76bace60a7590b7b669f9d433" exitCode=0 Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.304893 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"07720f90-b6f7-4b81-9c32-17f1e72b19fa","Type":"ContainerDied","Data":"6c728b7a4bd6db17ff62032233cd9d220168f2c76bace60a7590b7b669f9d433"} Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.304920 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"07720f90-b6f7-4b81-9c32-17f1e72b19fa","Type":"ContainerDied","Data":"201bbd8e2fb4fdbe2c467d4eec9082b94959a51d56c3d72326b1e23bd67261c2"} Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.304932 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="201bbd8e2fb4fdbe2c467d4eec9082b94959a51d56c3d72326b1e23bd67261c2" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.311690 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinderc68b-account-delete-rphsq" event={"ID":"10cd5eda-54cc-4c0a-91ca-4f8217e5220e","Type":"ContainerDied","Data":"999353903a6393e18d00372d1f68498c83ff83a66141a099c6f2985c61b7ce94"} Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.311713 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="999353903a6393e18d00372d1f68498c83ff83a66141a099c6f2985c61b7ce94" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.319215 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron3cec-account-delete-42tll" event={"ID":"b0797697-2b6d-4684-9fe1-e17a91f80369","Type":"ContainerDied","Data":"81f4258351ce40c54496f1c4d7aaa812e753dfd8441f5317cddb0ce60ce82d98"} Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.319239 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="81f4258351ce40c54496f1c4d7aaa812e753dfd8441f5317cddb0ce60ce82d98" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.329718 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron3cec-account-delete-42tll" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.334045 5037 generic.go:334] "Generic (PLEG): container finished" podID="693d1a99-bf33-42ee-adea-2f8ce0f6c002" containerID="69f2b0b56cf2f3be40f2a859173b22b913b09b1aec2185348206ae5ef68d4747" exitCode=0 Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.334351 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"693d1a99-bf33-42ee-adea-2f8ce0f6c002","Type":"ContainerDied","Data":"69f2b0b56cf2f3be40f2a859173b22b913b09b1aec2185348206ae5ef68d4747"} Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.334390 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"693d1a99-bf33-42ee-adea-2f8ce0f6c002","Type":"ContainerDied","Data":"8059a452772362925551c8d75ae0d9eebb593c0efe33628aaa1de6f5a5389f15"} Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.334413 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8059a452772362925551c8d75ae0d9eebb593c0efe33628aaa1de6f5a5389f15" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.348594 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement259c-account-delete-rj8qs"] Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.369849 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbicanf9d3-account-delete-qgkj4" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.384601 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7c767587b5-nzlv9" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.385957 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinderc68b-account-delete-rphsq" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.404410 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.416310 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement259c-account-delete-rj8qs"] Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.418824 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.444980 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8df4197d-046b-4b35-a14a-b382bda46242-operator-scripts\") pod \"8df4197d-046b-4b35-a14a-b382bda46242\" (UID: \"8df4197d-046b-4b35-a14a-b382bda46242\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.445178 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s7d82\" (UniqueName: \"kubernetes.io/projected/b0797697-2b6d-4684-9fe1-e17a91f80369-kube-api-access-s7d82\") pod \"b0797697-2b6d-4684-9fe1-e17a91f80369\" (UID: \"b0797697-2b6d-4684-9fe1-e17a91f80369\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.445668 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4bs4\" (UniqueName: \"kubernetes.io/projected/8df4197d-046b-4b35-a14a-b382bda46242-kube-api-access-w4bs4\") pod \"8df4197d-046b-4b35-a14a-b382bda46242\" (UID: \"8df4197d-046b-4b35-a14a-b382bda46242\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.445699 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b0797697-2b6d-4684-9fe1-e17a91f80369-operator-scripts\") pod \"b0797697-2b6d-4684-9fe1-e17a91f80369\" (UID: \"b0797697-2b6d-4684-9fe1-e17a91f80369\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.448046 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b0797697-2b6d-4684-9fe1-e17a91f80369-kube-api-access-s7d82" (OuterVolumeSpecName: "kube-api-access-s7d82") pod "b0797697-2b6d-4684-9fe1-e17a91f80369" (UID: "b0797697-2b6d-4684-9fe1-e17a91f80369"). InnerVolumeSpecName "kube-api-access-s7d82". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.448481 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b0797697-2b6d-4684-9fe1-e17a91f80369-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b0797697-2b6d-4684-9fe1-e17a91f80369" (UID: "b0797697-2b6d-4684-9fe1-e17a91f80369"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.449545 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8df4197d-046b-4b35-a14a-b382bda46242-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8df4197d-046b-4b35-a14a-b382bda46242" (UID: "8df4197d-046b-4b35-a14a-b382bda46242"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.450459 5037 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b0797697-2b6d-4684-9fe1-e17a91f80369-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.450480 5037 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8df4197d-046b-4b35-a14a-b382bda46242-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.450489 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s7d82\" (UniqueName: \"kubernetes.io/projected/b0797697-2b6d-4684-9fe1-e17a91f80369-kube-api-access-s7d82\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.452281 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8df4197d-046b-4b35-a14a-b382bda46242-kube-api-access-w4bs4" (OuterVolumeSpecName: "kube-api-access-w4bs4") pod "8df4197d-046b-4b35-a14a-b382bda46242" (UID: "8df4197d-046b-4b35-a14a-b382bda46242"). InnerVolumeSpecName "kube-api-access-w4bs4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.459749 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.463739 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.482767 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/memcached-0"] Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.494700 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-7978b45fdd-7t6zc"] Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.502569 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-7978b45fdd-7t6zc"] Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.512356 5037 scope.go:117] "RemoveContainer" containerID="98e55e6e1008fca6ad27dbc8db97cf30687f5fe52197409ea9b8d138f9f80df2" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.518517 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.531168 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.544932 5037 scope.go:117] "RemoveContainer" containerID="ae38d038fad3bbc384e79c4d7f1e060c20c2d38b3e29519ec6a7891fc4ff742b" Nov 26 14:44:09 crc kubenswrapper[5037]: E1126 14:44:09.546075 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ae38d038fad3bbc384e79c4d7f1e060c20c2d38b3e29519ec6a7891fc4ff742b\": container with ID starting with ae38d038fad3bbc384e79c4d7f1e060c20c2d38b3e29519ec6a7891fc4ff742b not found: ID does not exist" containerID="ae38d038fad3bbc384e79c4d7f1e060c20c2d38b3e29519ec6a7891fc4ff742b" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.546113 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ae38d038fad3bbc384e79c4d7f1e060c20c2d38b3e29519ec6a7891fc4ff742b"} err="failed to get container status \"ae38d038fad3bbc384e79c4d7f1e060c20c2d38b3e29519ec6a7891fc4ff742b\": rpc error: code = NotFound desc = could not find container \"ae38d038fad3bbc384e79c4d7f1e060c20c2d38b3e29519ec6a7891fc4ff742b\": container with ID starting with ae38d038fad3bbc384e79c4d7f1e060c20c2d38b3e29519ec6a7891fc4ff742b not found: ID does not exist" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.546136 5037 scope.go:117] "RemoveContainer" containerID="98e55e6e1008fca6ad27dbc8db97cf30687f5fe52197409ea9b8d138f9f80df2" Nov 26 14:44:09 crc kubenswrapper[5037]: E1126 14:44:09.548549 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"98e55e6e1008fca6ad27dbc8db97cf30687f5fe52197409ea9b8d138f9f80df2\": container with ID starting with 98e55e6e1008fca6ad27dbc8db97cf30687f5fe52197409ea9b8d138f9f80df2 not found: ID does not exist" containerID="98e55e6e1008fca6ad27dbc8db97cf30687f5fe52197409ea9b8d138f9f80df2" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.548589 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98e55e6e1008fca6ad27dbc8db97cf30687f5fe52197409ea9b8d138f9f80df2"} err="failed to get container status \"98e55e6e1008fca6ad27dbc8db97cf30687f5fe52197409ea9b8d138f9f80df2\": rpc error: code = NotFound desc = could not find container \"98e55e6e1008fca6ad27dbc8db97cf30687f5fe52197409ea9b8d138f9f80df2\": container with ID starting with 98e55e6e1008fca6ad27dbc8db97cf30687f5fe52197409ea9b8d138f9f80df2 not found: ID does not exist" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.548610 5037 scope.go:117] "RemoveContainer" containerID="2019faaa0d00a4dce9f4ce3484825a1f6132bcb7d194dfe843a8fd57678e6f7d" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.549811 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.551702 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e-nova-metadata-tls-certs\") pod \"e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e\" (UID: \"e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.551796 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\" (UID: \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.551836 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/10cd5eda-54cc-4c0a-91ca-4f8217e5220e-operator-scripts\") pod \"10cd5eda-54cc-4c0a-91ca-4f8217e5220e\" (UID: \"10cd5eda-54cc-4c0a-91ca-4f8217e5220e\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.551974 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d49cc40-ce20-415f-a979-398430c2bd81-config-data\") pod \"6d49cc40-ce20-415f-a979-398430c2bd81\" (UID: \"6d49cc40-ce20-415f-a979-398430c2bd81\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.552006 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/693d1a99-bf33-42ee-adea-2f8ce0f6c002-scripts\") pod \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\" (UID: \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.552032 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e-config-data\") pod \"e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e\" (UID: \"e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.552866 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rsmmv\" (UniqueName: \"kubernetes.io/projected/e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e-kube-api-access-rsmmv\") pod \"e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e\" (UID: \"e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.552898 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jp2bg\" (UniqueName: \"kubernetes.io/projected/10cd5eda-54cc-4c0a-91ca-4f8217e5220e-kube-api-access-jp2bg\") pod \"10cd5eda-54cc-4c0a-91ca-4f8217e5220e\" (UID: \"10cd5eda-54cc-4c0a-91ca-4f8217e5220e\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.552943 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d49cc40-ce20-415f-a979-398430c2bd81-combined-ca-bundle\") pod \"6d49cc40-ce20-415f-a979-398430c2bd81\" (UID: \"6d49cc40-ce20-415f-a979-398430c2bd81\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.552983 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zxs72\" (UniqueName: \"kubernetes.io/projected/6d49cc40-ce20-415f-a979-398430c2bd81-kube-api-access-zxs72\") pod \"6d49cc40-ce20-415f-a979-398430c2bd81\" (UID: \"6d49cc40-ce20-415f-a979-398430c2bd81\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.553012 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b3dc5e2c-0729-4f4d-8481-bd8fb0064a80-operator-scripts\") pod \"b3dc5e2c-0729-4f4d-8481-bd8fb0064a80\" (UID: \"b3dc5e2c-0729-4f4d-8481-bd8fb0064a80\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.553048 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e-combined-ca-bundle\") pod \"e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e\" (UID: \"e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.553093 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6d49cc40-ce20-415f-a979-398430c2bd81-logs\") pod \"6d49cc40-ce20-415f-a979-398430c2bd81\" (UID: \"6d49cc40-ce20-415f-a979-398430c2bd81\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.553117 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jpqmn\" (UniqueName: \"kubernetes.io/projected/693d1a99-bf33-42ee-adea-2f8ce0f6c002-kube-api-access-jpqmn\") pod \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\" (UID: \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.553148 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/693d1a99-bf33-42ee-adea-2f8ce0f6c002-public-tls-certs\") pod \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\" (UID: \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.553179 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/693d1a99-bf33-42ee-adea-2f8ce0f6c002-config-data\") pod \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\" (UID: \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.553219 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/693d1a99-bf33-42ee-adea-2f8ce0f6c002-httpd-run\") pod \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\" (UID: \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.553269 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e-logs\") pod \"e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e\" (UID: \"e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.553321 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/693d1a99-bf33-42ee-adea-2f8ce0f6c002-combined-ca-bundle\") pod \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\" (UID: \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.553352 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6d49cc40-ce20-415f-a979-398430c2bd81-config-data-custom\") pod \"6d49cc40-ce20-415f-a979-398430c2bd81\" (UID: \"6d49cc40-ce20-415f-a979-398430c2bd81\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.553383 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkmzj\" (UniqueName: \"kubernetes.io/projected/b3dc5e2c-0729-4f4d-8481-bd8fb0064a80-kube-api-access-zkmzj\") pod \"b3dc5e2c-0729-4f4d-8481-bd8fb0064a80\" (UID: \"b3dc5e2c-0729-4f4d-8481-bd8fb0064a80\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.553442 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/693d1a99-bf33-42ee-adea-2f8ce0f6c002-logs\") pod \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\" (UID: \"693d1a99-bf33-42ee-adea-2f8ce0f6c002\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.553589 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/10cd5eda-54cc-4c0a-91ca-4f8217e5220e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "10cd5eda-54cc-4c0a-91ca-4f8217e5220e" (UID: "10cd5eda-54cc-4c0a-91ca-4f8217e5220e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.554566 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e-logs" (OuterVolumeSpecName: "logs") pod "e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e" (UID: "e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.558239 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "glance") pod "693d1a99-bf33-42ee-adea-2f8ce0f6c002" (UID: "693d1a99-bf33-42ee-adea-2f8ce0f6c002"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.558315 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/693d1a99-bf33-42ee-adea-2f8ce0f6c002-scripts" (OuterVolumeSpecName: "scripts") pod "693d1a99-bf33-42ee-adea-2f8ce0f6c002" (UID: "693d1a99-bf33-42ee-adea-2f8ce0f6c002"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.558656 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/693d1a99-bf33-42ee-adea-2f8ce0f6c002-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "693d1a99-bf33-42ee-adea-2f8ce0f6c002" (UID: "693d1a99-bf33-42ee-adea-2f8ce0f6c002"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.561209 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e-kube-api-access-rsmmv" (OuterVolumeSpecName: "kube-api-access-rsmmv") pod "e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e" (UID: "e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e"). InnerVolumeSpecName "kube-api-access-rsmmv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.561519 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/693d1a99-bf33-42ee-adea-2f8ce0f6c002-logs" (OuterVolumeSpecName: "logs") pod "693d1a99-bf33-42ee-adea-2f8ce0f6c002" (UID: "693d1a99-bf33-42ee-adea-2f8ce0f6c002"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.561821 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6d49cc40-ce20-415f-a979-398430c2bd81-logs" (OuterVolumeSpecName: "logs") pod "6d49cc40-ce20-415f-a979-398430c2bd81" (UID: "6d49cc40-ce20-415f-a979-398430c2bd81"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.562304 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3dc5e2c-0729-4f4d-8481-bd8fb0064a80-kube-api-access-zkmzj" (OuterVolumeSpecName: "kube-api-access-zkmzj") pod "b3dc5e2c-0729-4f4d-8481-bd8fb0064a80" (UID: "b3dc5e2c-0729-4f4d-8481-bd8fb0064a80"). InnerVolumeSpecName "kube-api-access-zkmzj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.563045 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b3dc5e2c-0729-4f4d-8481-bd8fb0064a80-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b3dc5e2c-0729-4f4d-8481-bd8fb0064a80" (UID: "b3dc5e2c-0729-4f4d-8481-bd8fb0064a80"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.563181 5037 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/10cd5eda-54cc-4c0a-91ca-4f8217e5220e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.563208 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4bs4\" (UniqueName: \"kubernetes.io/projected/8df4197d-046b-4b35-a14a-b382bda46242-kube-api-access-w4bs4\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.565497 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d49cc40-ce20-415f-a979-398430c2bd81-kube-api-access-zxs72" (OuterVolumeSpecName: "kube-api-access-zxs72") pod "6d49cc40-ce20-415f-a979-398430c2bd81" (UID: "6d49cc40-ce20-415f-a979-398430c2bd81"). InnerVolumeSpecName "kube-api-access-zxs72". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.565984 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/693d1a99-bf33-42ee-adea-2f8ce0f6c002-kube-api-access-jpqmn" (OuterVolumeSpecName: "kube-api-access-jpqmn") pod "693d1a99-bf33-42ee-adea-2f8ce0f6c002" (UID: "693d1a99-bf33-42ee-adea-2f8ce0f6c002"). InnerVolumeSpecName "kube-api-access-jpqmn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.567153 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10cd5eda-54cc-4c0a-91ca-4f8217e5220e-kube-api-access-jp2bg" (OuterVolumeSpecName: "kube-api-access-jp2bg") pod "10cd5eda-54cc-4c0a-91ca-4f8217e5220e" (UID: "10cd5eda-54cc-4c0a-91ca-4f8217e5220e"). InnerVolumeSpecName "kube-api-access-jp2bg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.574418 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d49cc40-ce20-415f-a979-398430c2bd81-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "6d49cc40-ce20-415f-a979-398430c2bd81" (UID: "6d49cc40-ce20-415f-a979-398430c2bd81"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.610504 5037 scope.go:117] "RemoveContainer" containerID="ac124cedaed73284f9bbf36277718f97b7c752f739fd563062cca9f2857a6274" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.617482 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e" (UID: "e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.618851 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/693d1a99-bf33-42ee-adea-2f8ce0f6c002-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "693d1a99-bf33-42ee-adea-2f8ce0f6c002" (UID: "693d1a99-bf33-42ee-adea-2f8ce0f6c002"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.619402 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e-config-data" (OuterVolumeSpecName: "config-data") pod "e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e" (UID: "e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.639928 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d49cc40-ce20-415f-a979-398430c2bd81-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6d49cc40-ce20-415f-a979-398430c2bd81" (UID: "6d49cc40-ce20-415f-a979-398430c2bd81"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.652966 5037 scope.go:117] "RemoveContainer" containerID="4f57619ebc65ee19c82e274478cdb8f19dd8e02a6b90642fbf2271294bdfb236" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.659592 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e" (UID: "e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.664157 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/07720f90-b6f7-4b81-9c32-17f1e72b19fa-public-tls-certs\") pod \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\" (UID: \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.666902 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cff988a9-69e2-42cc-a456-426f13be8a58-combined-ca-bundle\") pod \"cff988a9-69e2-42cc-a456-426f13be8a58\" (UID: \"cff988a9-69e2-42cc-a456-426f13be8a58\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.666966 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cff988a9-69e2-42cc-a456-426f13be8a58-internal-tls-certs\") pod \"cff988a9-69e2-42cc-a456-426f13be8a58\" (UID: \"cff988a9-69e2-42cc-a456-426f13be8a58\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.667007 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07720f90-b6f7-4b81-9c32-17f1e72b19fa-scripts\") pod \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\" (UID: \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.667028 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cff988a9-69e2-42cc-a456-426f13be8a58-public-tls-certs\") pod \"cff988a9-69e2-42cc-a456-426f13be8a58\" (UID: \"cff988a9-69e2-42cc-a456-426f13be8a58\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.667059 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/07720f90-b6f7-4b81-9c32-17f1e72b19fa-config-data-custom\") pod \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\" (UID: \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.667092 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07720f90-b6f7-4b81-9c32-17f1e72b19fa-config-data\") pod \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\" (UID: \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.667144 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07720f90-b6f7-4b81-9c32-17f1e72b19fa-logs\") pod \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\" (UID: \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.667210 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/07720f90-b6f7-4b81-9c32-17f1e72b19fa-etc-machine-id\") pod \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\" (UID: \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.667253 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07720f90-b6f7-4b81-9c32-17f1e72b19fa-combined-ca-bundle\") pod \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\" (UID: \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.667275 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/07720f90-b6f7-4b81-9c32-17f1e72b19fa-internal-tls-certs\") pod \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\" (UID: \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.667369 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qqjj6\" (UniqueName: \"kubernetes.io/projected/07720f90-b6f7-4b81-9c32-17f1e72b19fa-kube-api-access-qqjj6\") pod \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\" (UID: \"07720f90-b6f7-4b81-9c32-17f1e72b19fa\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.667397 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cff988a9-69e2-42cc-a456-426f13be8a58-logs\") pod \"cff988a9-69e2-42cc-a456-426f13be8a58\" (UID: \"cff988a9-69e2-42cc-a456-426f13be8a58\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.667445 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cff988a9-69e2-42cc-a456-426f13be8a58-config-data\") pod \"cff988a9-69e2-42cc-a456-426f13be8a58\" (UID: \"cff988a9-69e2-42cc-a456-426f13be8a58\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.667474 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kslw4\" (UniqueName: \"kubernetes.io/projected/cff988a9-69e2-42cc-a456-426f13be8a58-kube-api-access-kslw4\") pod \"cff988a9-69e2-42cc-a456-426f13be8a58\" (UID: \"cff988a9-69e2-42cc-a456-426f13be8a58\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.668101 5037 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/693d1a99-bf33-42ee-adea-2f8ce0f6c002-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.668120 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.668132 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rsmmv\" (UniqueName: \"kubernetes.io/projected/e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e-kube-api-access-rsmmv\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.668144 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jp2bg\" (UniqueName: \"kubernetes.io/projected/10cd5eda-54cc-4c0a-91ca-4f8217e5220e-kube-api-access-jp2bg\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.668157 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d49cc40-ce20-415f-a979-398430c2bd81-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.668167 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zxs72\" (UniqueName: \"kubernetes.io/projected/6d49cc40-ce20-415f-a979-398430c2bd81-kube-api-access-zxs72\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.668178 5037 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b3dc5e2c-0729-4f4d-8481-bd8fb0064a80-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.668188 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.668198 5037 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6d49cc40-ce20-415f-a979-398430c2bd81-logs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.668210 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jpqmn\" (UniqueName: \"kubernetes.io/projected/693d1a99-bf33-42ee-adea-2f8ce0f6c002-kube-api-access-jpqmn\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.668222 5037 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/693d1a99-bf33-42ee-adea-2f8ce0f6c002-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.668232 5037 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e-logs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.668241 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/693d1a99-bf33-42ee-adea-2f8ce0f6c002-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.668251 5037 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6d49cc40-ce20-415f-a979-398430c2bd81-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.668262 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkmzj\" (UniqueName: \"kubernetes.io/projected/b3dc5e2c-0729-4f4d-8481-bd8fb0064a80-kube-api-access-zkmzj\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.668272 5037 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/693d1a99-bf33-42ee-adea-2f8ce0f6c002-logs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.668281 5037 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.668321 5037 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.669097 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07720f90-b6f7-4b81-9c32-17f1e72b19fa-logs" (OuterVolumeSpecName: "logs") pod "07720f90-b6f7-4b81-9c32-17f1e72b19fa" (UID: "07720f90-b6f7-4b81-9c32-17f1e72b19fa"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.669797 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d49cc40-ce20-415f-a979-398430c2bd81-config-data" (OuterVolumeSpecName: "config-data") pod "6d49cc40-ce20-415f-a979-398430c2bd81" (UID: "6d49cc40-ce20-415f-a979-398430c2bd81"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.670340 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cff988a9-69e2-42cc-a456-426f13be8a58-logs" (OuterVolumeSpecName: "logs") pod "cff988a9-69e2-42cc-a456-426f13be8a58" (UID: "cff988a9-69e2-42cc-a456-426f13be8a58"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.673139 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07720f90-b6f7-4b81-9c32-17f1e72b19fa-kube-api-access-qqjj6" (OuterVolumeSpecName: "kube-api-access-qqjj6") pod "07720f90-b6f7-4b81-9c32-17f1e72b19fa" (UID: "07720f90-b6f7-4b81-9c32-17f1e72b19fa"). InnerVolumeSpecName "kube-api-access-qqjj6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.676897 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/07720f90-b6f7-4b81-9c32-17f1e72b19fa-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "07720f90-b6f7-4b81-9c32-17f1e72b19fa" (UID: "07720f90-b6f7-4b81-9c32-17f1e72b19fa"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.683079 5037 scope.go:117] "RemoveContainer" containerID="4f3ea9d9853eb70966721e3e3e7a15223cf43f7532f45c09e3855990aac57118" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.683598 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07720f90-b6f7-4b81-9c32-17f1e72b19fa-scripts" (OuterVolumeSpecName: "scripts") pod "07720f90-b6f7-4b81-9c32-17f1e72b19fa" (UID: "07720f90-b6f7-4b81-9c32-17f1e72b19fa"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.697539 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cff988a9-69e2-42cc-a456-426f13be8a58-kube-api-access-kslw4" (OuterVolumeSpecName: "kube-api-access-kslw4") pod "cff988a9-69e2-42cc-a456-426f13be8a58" (UID: "cff988a9-69e2-42cc-a456-426f13be8a58"). InnerVolumeSpecName "kube-api-access-kslw4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.715767 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07720f90-b6f7-4b81-9c32-17f1e72b19fa-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "07720f90-b6f7-4b81-9c32-17f1e72b19fa" (UID: "07720f90-b6f7-4b81-9c32-17f1e72b19fa"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.723232 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance4d25-account-delete-pftxq" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.732532 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07720f90-b6f7-4b81-9c32-17f1e72b19fa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "07720f90-b6f7-4b81-9c32-17f1e72b19fa" (UID: "07720f90-b6f7-4b81-9c32-17f1e72b19fa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.737929 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/693d1a99-bf33-42ee-adea-2f8ce0f6c002-config-data" (OuterVolumeSpecName: "config-data") pod "693d1a99-bf33-42ee-adea-2f8ce0f6c002" (UID: "693d1a99-bf33-42ee-adea-2f8ce0f6c002"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.745301 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/693d1a99-bf33-42ee-adea-2f8ce0f6c002-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "693d1a99-bf33-42ee-adea-2f8ce0f6c002" (UID: "693d1a99-bf33-42ee-adea-2f8ce0f6c002"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.757103 5037 scope.go:117] "RemoveContainer" containerID="2f6e30bd74ea66c491e2959c075dfac83aa041657baf37104388b43c5d325007" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.770145 5037 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07720f90-b6f7-4b81-9c32-17f1e72b19fa-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.770170 5037 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/07720f90-b6f7-4b81-9c32-17f1e72b19fa-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.770180 5037 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/693d1a99-bf33-42ee-adea-2f8ce0f6c002-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.770189 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/693d1a99-bf33-42ee-adea-2f8ce0f6c002-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.770197 5037 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07720f90-b6f7-4b81-9c32-17f1e72b19fa-logs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.770204 5037 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/07720f90-b6f7-4b81-9c32-17f1e72b19fa-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.770212 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07720f90-b6f7-4b81-9c32-17f1e72b19fa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.770220 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qqjj6\" (UniqueName: \"kubernetes.io/projected/07720f90-b6f7-4b81-9c32-17f1e72b19fa-kube-api-access-qqjj6\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.770228 5037 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cff988a9-69e2-42cc-a456-426f13be8a58-logs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.770236 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kslw4\" (UniqueName: \"kubernetes.io/projected/cff988a9-69e2-42cc-a456-426f13be8a58-kube-api-access-kslw4\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.770244 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d49cc40-ce20-415f-a979-398430c2bd81-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.777471 5037 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.781708 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07720f90-b6f7-4b81-9c32-17f1e72b19fa-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "07720f90-b6f7-4b81-9c32-17f1e72b19fa" (UID: "07720f90-b6f7-4b81-9c32-17f1e72b19fa"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.786654 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07720f90-b6f7-4b81-9c32-17f1e72b19fa-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "07720f90-b6f7-4b81-9c32-17f1e72b19fa" (UID: "07720f90-b6f7-4b81-9c32-17f1e72b19fa"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.792893 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07720f90-b6f7-4b81-9c32-17f1e72b19fa-config-data" (OuterVolumeSpecName: "config-data") pod "07720f90-b6f7-4b81-9c32-17f1e72b19fa" (UID: "07720f90-b6f7-4b81-9c32-17f1e72b19fa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.817409 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cff988a9-69e2-42cc-a456-426f13be8a58-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cff988a9-69e2-42cc-a456-426f13be8a58" (UID: "cff988a9-69e2-42cc-a456-426f13be8a58"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.820221 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cff988a9-69e2-42cc-a456-426f13be8a58-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "cff988a9-69e2-42cc-a456-426f13be8a58" (UID: "cff988a9-69e2-42cc-a456-426f13be8a58"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.822811 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cff988a9-69e2-42cc-a456-426f13be8a58-config-data" (OuterVolumeSpecName: "config-data") pod "cff988a9-69e2-42cc-a456-426f13be8a58" (UID: "cff988a9-69e2-42cc-a456-426f13be8a58"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.843712 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cff988a9-69e2-42cc-a456-426f13be8a58-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "cff988a9-69e2-42cc-a456-426f13be8a58" (UID: "cff988a9-69e2-42cc-a456-426f13be8a58"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.854975 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novacell0e6d6-account-delete-j5w7q" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.871073 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mjdn6\" (UniqueName: \"kubernetes.io/projected/5b53df32-369f-4a91-bb97-5da067cc3c6a-kube-api-access-mjdn6\") pod \"5b53df32-369f-4a91-bb97-5da067cc3c6a\" (UID: \"5b53df32-369f-4a91-bb97-5da067cc3c6a\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.871137 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5b53df32-369f-4a91-bb97-5da067cc3c6a-operator-scripts\") pod \"5b53df32-369f-4a91-bb97-5da067cc3c6a\" (UID: \"5b53df32-369f-4a91-bb97-5da067cc3c6a\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.871560 5037 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/07720f90-b6f7-4b81-9c32-17f1e72b19fa-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.871573 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cff988a9-69e2-42cc-a456-426f13be8a58-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.871582 5037 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cff988a9-69e2-42cc-a456-426f13be8a58-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.871589 5037 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cff988a9-69e2-42cc-a456-426f13be8a58-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.871598 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07720f90-b6f7-4b81-9c32-17f1e72b19fa-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.871605 5037 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/07720f90-b6f7-4b81-9c32-17f1e72b19fa-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.871613 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cff988a9-69e2-42cc-a456-426f13be8a58-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.871620 5037 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: E1126 14:44:09.872618 5037 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 26 14:44:09 crc kubenswrapper[5037]: E1126 14:44:09.872683 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ba78b94a-32d0-4377-ac41-ffd036b241bf-config-data podName:ba78b94a-32d0-4377-ac41-ffd036b241bf nodeName:}" failed. No retries permitted until 2025-11-26 14:44:17.872666175 +0000 UTC m=+1724.669436359 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/ba78b94a-32d0-4377-ac41-ffd036b241bf-config-data") pod "rabbitmq-server-0" (UID: "ba78b94a-32d0-4377-ac41-ffd036b241bf") : configmap "rabbitmq-config-data" not found Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.875933 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b53df32-369f-4a91-bb97-5da067cc3c6a-kube-api-access-mjdn6" (OuterVolumeSpecName: "kube-api-access-mjdn6") pod "5b53df32-369f-4a91-bb97-5da067cc3c6a" (UID: "5b53df32-369f-4a91-bb97-5da067cc3c6a"). InnerVolumeSpecName "kube-api-access-mjdn6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.876355 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b53df32-369f-4a91-bb97-5da067cc3c6a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5b53df32-369f-4a91-bb97-5da067cc3c6a" (UID: "5b53df32-369f-4a91-bb97-5da067cc3c6a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.906453 5037 scope.go:117] "RemoveContainer" containerID="b1db0ccf747c065689c039923c194e8419b6cd5a8c76ec974b99511a7ede0d79" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.908522 5037 scope.go:117] "RemoveContainer" containerID="5e69d7717514aa68d798cc4f8eee9b2d5d3e9666ca3b110c2cb4c6b90f9e1181" Nov 26 14:44:09 crc kubenswrapper[5037]: E1126 14:44:09.909120 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.925766 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="03ffa609-b428-4a0e-8ec1-5c205391cf7b" path="/var/lib/kubelet/pods/03ffa609-b428-4a0e-8ec1-5c205391cf7b/volumes" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.926516 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e" path="/var/lib/kubelet/pods/2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e/volumes" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.927624 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="300dce8f-4337-4707-8075-f32b93f03e4f" path="/var/lib/kubelet/pods/300dce8f-4337-4707-8075-f32b93f03e4f/volumes" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.929941 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="334f3bb7-793e-4cff-b0ef-de24dc8a46b5" path="/var/lib/kubelet/pods/334f3bb7-793e-4cff-b0ef-de24dc8a46b5/volumes" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.930545 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4408c030-a5ac-49ae-9361-54cbe3c27108" path="/var/lib/kubelet/pods/4408c030-a5ac-49ae-9361-54cbe3c27108/volumes" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.931066 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf" path="/var/lib/kubelet/pods/6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf/volumes" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.932484 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="86867a49-37d5-4289-a31e-8eed1257c87a" path="/var/lib/kubelet/pods/86867a49-37d5-4289-a31e-8eed1257c87a/volumes" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.936683 5037 scope.go:117] "RemoveContainer" containerID="2f6e30bd74ea66c491e2959c075dfac83aa041657baf37104388b43c5d325007" Nov 26 14:44:09 crc kubenswrapper[5037]: E1126 14:44:09.937059 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2f6e30bd74ea66c491e2959c075dfac83aa041657baf37104388b43c5d325007\": container with ID starting with 2f6e30bd74ea66c491e2959c075dfac83aa041657baf37104388b43c5d325007 not found: ID does not exist" containerID="2f6e30bd74ea66c491e2959c075dfac83aa041657baf37104388b43c5d325007" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.937092 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f6e30bd74ea66c491e2959c075dfac83aa041657baf37104388b43c5d325007"} err="failed to get container status \"2f6e30bd74ea66c491e2959c075dfac83aa041657baf37104388b43c5d325007\": rpc error: code = NotFound desc = could not find container \"2f6e30bd74ea66c491e2959c075dfac83aa041657baf37104388b43c5d325007\": container with ID starting with 2f6e30bd74ea66c491e2959c075dfac83aa041657baf37104388b43c5d325007 not found: ID does not exist" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.937116 5037 scope.go:117] "RemoveContainer" containerID="b1db0ccf747c065689c039923c194e8419b6cd5a8c76ec974b99511a7ede0d79" Nov 26 14:44:09 crc kubenswrapper[5037]: E1126 14:44:09.937911 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b1db0ccf747c065689c039923c194e8419b6cd5a8c76ec974b99511a7ede0d79\": container with ID starting with b1db0ccf747c065689c039923c194e8419b6cd5a8c76ec974b99511a7ede0d79 not found: ID does not exist" containerID="b1db0ccf747c065689c039923c194e8419b6cd5a8c76ec974b99511a7ede0d79" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.937956 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1db0ccf747c065689c039923c194e8419b6cd5a8c76ec974b99511a7ede0d79"} err="failed to get container status \"b1db0ccf747c065689c039923c194e8419b6cd5a8c76ec974b99511a7ede0d79\": rpc error: code = NotFound desc = could not find container \"b1db0ccf747c065689c039923c194e8419b6cd5a8c76ec974b99511a7ede0d79\": container with ID starting with b1db0ccf747c065689c039923c194e8419b6cd5a8c76ec974b99511a7ede0d79 not found: ID does not exist" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.937982 5037 scope.go:117] "RemoveContainer" containerID="ac800c71f24567e467410fb7c333d7691707d518b6f0f84492f89244a18f9205" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.963731 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9c3c49ff-cf53-4b5b-ba83-10877d499763" path="/var/lib/kubelet/pods/9c3c49ff-cf53-4b5b-ba83-10877d499763/volumes" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.964259 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7ece585-54a5-40d4-866f-98c968f03910" path="/var/lib/kubelet/pods/a7ece585-54a5-40d4-866f-98c968f03910/volumes" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.973147 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bvbqc\" (UniqueName: \"kubernetes.io/projected/d4fd340f-f656-4ec3-aba1-a33eaa58aed0-kube-api-access-bvbqc\") pod \"d4fd340f-f656-4ec3-aba1-a33eaa58aed0\" (UID: \"d4fd340f-f656-4ec3-aba1-a33eaa58aed0\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.973310 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d4fd340f-f656-4ec3-aba1-a33eaa58aed0-operator-scripts\") pod \"d4fd340f-f656-4ec3-aba1-a33eaa58aed0\" (UID: \"d4fd340f-f656-4ec3-aba1-a33eaa58aed0\") " Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.974261 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d4fd340f-f656-4ec3-aba1-a33eaa58aed0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d4fd340f-f656-4ec3-aba1-a33eaa58aed0" (UID: "d4fd340f-f656-4ec3-aba1-a33eaa58aed0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.975414 5037 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d4fd340f-f656-4ec3-aba1-a33eaa58aed0-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.975434 5037 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5b53df32-369f-4a91-bb97-5da067cc3c6a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.975443 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mjdn6\" (UniqueName: \"kubernetes.io/projected/5b53df32-369f-4a91-bb97-5da067cc3c6a-kube-api-access-mjdn6\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:09 crc kubenswrapper[5037]: E1126 14:44:09.975463 5037 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 26 14:44:09 crc kubenswrapper[5037]: E1126 14:44:09.975567 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/7f05291f-1331-411b-9971-c71218d11a35-config-data podName:7f05291f-1331-411b-9971-c71218d11a35 nodeName:}" failed. No retries permitted until 2025-11-26 14:44:17.975542377 +0000 UTC m=+1724.772312561 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/7f05291f-1331-411b-9971-c71218d11a35-config-data") pod "rabbitmq-cell1-server-0" (UID: "7f05291f-1331-411b-9971-c71218d11a35") : configmap "rabbitmq-cell1-config-data" not found Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.977566 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d4fd340f-f656-4ec3-aba1-a33eaa58aed0-kube-api-access-bvbqc" (OuterVolumeSpecName: "kube-api-access-bvbqc") pod "d4fd340f-f656-4ec3-aba1-a33eaa58aed0" (UID: "d4fd340f-f656-4ec3-aba1-a33eaa58aed0"). InnerVolumeSpecName "kube-api-access-bvbqc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.983183 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ab0d1aa1-4e89-4fa1-a06d-a199ed98670f" path="/var/lib/kubelet/pods/ab0d1aa1-4e89-4fa1-a06d-a199ed98670f/volumes" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.984148 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aed636f4-272c-4379-a6f3-8247ae0e46cc" path="/var/lib/kubelet/pods/aed636f4-272c-4379-a6f3-8247ae0e46cc/volumes" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.984858 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bdd4849b-e92e-473d-88d0-74c060c04eb7" path="/var/lib/kubelet/pods/bdd4849b-e92e-473d-88d0-74c060c04eb7/volumes" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.985898 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c2d75a18-6446-4558-af57-c6e0c957fc3b" path="/var/lib/kubelet/pods/c2d75a18-6446-4558-af57-c6e0c957fc3b/volumes" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.986592 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dd47ce65-1426-47e2-a5d1-6efd83bac3ab" path="/var/lib/kubelet/pods/dd47ce65-1426-47e2-a5d1-6efd83bac3ab/volumes" Nov 26 14:44:09 crc kubenswrapper[5037]: I1126 14:44:09.987115 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd171888-b656-4511-af7d-cdff1058bf5f" path="/var/lib/kubelet/pods/fd171888-b656-4511-af7d-cdff1058bf5f/volumes" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.006449 5037 scope.go:117] "RemoveContainer" containerID="b153954d737c10034799ce4a540d151fbb5420d7fc983a9615870845e76fb0be" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.076772 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bvbqc\" (UniqueName: \"kubernetes.io/projected/d4fd340f-f656-4ec3-aba1-a33eaa58aed0-kube-api-access-bvbqc\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.294553 5037 scope.go:117] "RemoveContainer" containerID="b5c06e36fe4f8a14ce6fd85185880cc6b1029af8d4de11a5e266f008fdc3b833" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.348990 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cff988a9-69e2-42cc-a456-426f13be8a58","Type":"ContainerDied","Data":"4ea9727ed78454115502b78b044845297e0a6de7ba85ead9415a911f8fe74e90"} Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.349062 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.355690 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance4d25-account-delete-pftxq" event={"ID":"5b53df32-369f-4a91-bb97-5da067cc3c6a","Type":"ContainerDied","Data":"f03e59690ac79242d781f2c6ec08d1f34665025a0bc60a8ed94b3b2272e3ab97"} Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.355769 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance4d25-account-delete-pftxq" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.359069 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell0e6d6-account-delete-j5w7q" event={"ID":"d4fd340f-f656-4ec3-aba1-a33eaa58aed0","Type":"ContainerDied","Data":"86e6ff4d67702c5fe009cee75f287a92fccea94f97a8f860d2119eb000575ab6"} Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.359128 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novacell0e6d6-account-delete-j5w7q" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.383098 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_ec26620a-6ad8-4792-bb25-543dc31d3be5/ovn-northd/0.log" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.383152 5037 generic.go:334] "Generic (PLEG): container finished" podID="ec26620a-6ad8-4792-bb25-543dc31d3be5" containerID="d3158b8703e1c139eecff816090fc54bf7b1598ce59a6a91d56a6bde613e9529" exitCode=139 Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.383238 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"ec26620a-6ad8-4792-bb25-543dc31d3be5","Type":"ContainerDied","Data":"d3158b8703e1c139eecff816090fc54bf7b1598ce59a6a91d56a6bde613e9529"} Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.388385 5037 generic.go:334] "Generic (PLEG): container finished" podID="fe13f626-50c7-4ec3-b967-20f038731571" containerID="ca5593d895153686d827f3a444c0ce51200735ce910a9e9d65ec173d66664c8b" exitCode=0 Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.388452 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-fb548d49-hf8zh" event={"ID":"fe13f626-50c7-4ec3-b967-20f038731571","Type":"ContainerDied","Data":"ca5593d895153686d827f3a444c0ce51200735ce910a9e9d65ec173d66664c8b"} Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.390225 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novaapieb2b-account-delete-988tl" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.392084 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.392874 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinderc68b-account-delete-rphsq" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.393506 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.393741 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron3cec-account-delete-42tll" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.394415 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7c767587b5-nzlv9" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.394597 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.400500 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbicanf9d3-account-delete-qgkj4" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.411286 5037 scope.go:117] "RemoveContainer" containerID="b153954d737c10034799ce4a540d151fbb5420d7fc983a9615870845e76fb0be" Nov 26 14:44:10 crc kubenswrapper[5037]: E1126 14:44:10.411451 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b658272682462e675abb3613cfedbdb070fda6a2ec653d911c6d60b0faa08bd2" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 26 14:44:10 crc kubenswrapper[5037]: E1126 14:44:10.411963 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b153954d737c10034799ce4a540d151fbb5420d7fc983a9615870845e76fb0be\": container with ID starting with b153954d737c10034799ce4a540d151fbb5420d7fc983a9615870845e76fb0be not found: ID does not exist" containerID="b153954d737c10034799ce4a540d151fbb5420d7fc983a9615870845e76fb0be" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.411995 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b153954d737c10034799ce4a540d151fbb5420d7fc983a9615870845e76fb0be"} err="failed to get container status \"b153954d737c10034799ce4a540d151fbb5420d7fc983a9615870845e76fb0be\": rpc error: code = NotFound desc = could not find container \"b153954d737c10034799ce4a540d151fbb5420d7fc983a9615870845e76fb0be\": container with ID starting with b153954d737c10034799ce4a540d151fbb5420d7fc983a9615870845e76fb0be not found: ID does not exist" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.412019 5037 scope.go:117] "RemoveContainer" containerID="b5c06e36fe4f8a14ce6fd85185880cc6b1029af8d4de11a5e266f008fdc3b833" Nov 26 14:44:10 crc kubenswrapper[5037]: E1126 14:44:10.412993 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b5c06e36fe4f8a14ce6fd85185880cc6b1029af8d4de11a5e266f008fdc3b833\": container with ID starting with b5c06e36fe4f8a14ce6fd85185880cc6b1029af8d4de11a5e266f008fdc3b833 not found: ID does not exist" containerID="b5c06e36fe4f8a14ce6fd85185880cc6b1029af8d4de11a5e266f008fdc3b833" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.413026 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b5c06e36fe4f8a14ce6fd85185880cc6b1029af8d4de11a5e266f008fdc3b833"} err="failed to get container status \"b5c06e36fe4f8a14ce6fd85185880cc6b1029af8d4de11a5e266f008fdc3b833\": rpc error: code = NotFound desc = could not find container \"b5c06e36fe4f8a14ce6fd85185880cc6b1029af8d4de11a5e266f008fdc3b833\": container with ID starting with b5c06e36fe4f8a14ce6fd85185880cc6b1029af8d4de11a5e266f008fdc3b833 not found: ID does not exist" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.413048 5037 scope.go:117] "RemoveContainer" containerID="cb5de0febf4f6869c6113a77abea3425966e60873437776dd7f265ea84cd9709" Nov 26 14:44:10 crc kubenswrapper[5037]: E1126 14:44:10.416194 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b658272682462e675abb3613cfedbdb070fda6a2ec653d911c6d60b0faa08bd2" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 26 14:44:10 crc kubenswrapper[5037]: E1126 14:44:10.418534 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b658272682462e675abb3613cfedbdb070fda6a2ec653d911c6d60b0faa08bd2" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 26 14:44:10 crc kubenswrapper[5037]: E1126 14:44:10.418633 5037 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="8707a232-f648-4795-b250-d29069f26514" containerName="nova-cell0-conductor-conductor" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.447779 5037 scope.go:117] "RemoveContainer" containerID="852040b491cc42295268755c4a7220816c3a15eb3bae51127b18b8351d773e4d" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.473826 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.482442 5037 scope.go:117] "RemoveContainer" containerID="79410aa1ae42d0ccbb1f24403b971fbdf2b68b70ddace0cddb3256c93023bbb4" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.496678 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.574025 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-7c767587b5-nzlv9"] Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.586131 5037 scope.go:117] "RemoveContainer" containerID="3dcbf6703b46c5a8172c41249335b346fb201a3ba3a13f8712f5740aef9fb594" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.607816 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-worker-7c767587b5-nzlv9"] Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.619901 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinderc68b-account-delete-rphsq"] Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.628577 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinderc68b-account-delete-rphsq"] Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.640170 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance4d25-account-delete-pftxq"] Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.647091 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance4d25-account-delete-pftxq"] Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.652903 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novaapieb2b-account-delete-988tl"] Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.653666 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-fb548d49-hf8zh" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.658400 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/novaapieb2b-account-delete-988tl"] Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.663604 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell0e6d6-account-delete-j5w7q"] Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.675436 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/novacell0e6d6-account-delete-j5w7q"] Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.681039 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.688957 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.695646 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.702061 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.712018 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron3cec-account-delete-42tll"] Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.713442 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron3cec-account-delete-42tll"] Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.721513 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbicanf9d3-account-delete-qgkj4"] Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.730493 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbicanf9d3-account-delete-qgkj4"] Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.737666 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.744963 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.786824 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="ba78b94a-32d0-4377-ac41-ffd036b241bf" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.100:5671: connect: connection refused" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.791819 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-combined-ca-bundle\") pod \"fe13f626-50c7-4ec3-b967-20f038731571\" (UID: \"fe13f626-50c7-4ec3-b967-20f038731571\") " Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.791932 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-public-tls-certs\") pod \"fe13f626-50c7-4ec3-b967-20f038731571\" (UID: \"fe13f626-50c7-4ec3-b967-20f038731571\") " Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.792005 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-internal-tls-certs\") pod \"fe13f626-50c7-4ec3-b967-20f038731571\" (UID: \"fe13f626-50c7-4ec3-b967-20f038731571\") " Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.792061 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bbtq5\" (UniqueName: \"kubernetes.io/projected/fe13f626-50c7-4ec3-b967-20f038731571-kube-api-access-bbtq5\") pod \"fe13f626-50c7-4ec3-b967-20f038731571\" (UID: \"fe13f626-50c7-4ec3-b967-20f038731571\") " Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.792098 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-credential-keys\") pod \"fe13f626-50c7-4ec3-b967-20f038731571\" (UID: \"fe13f626-50c7-4ec3-b967-20f038731571\") " Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.792185 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-scripts\") pod \"fe13f626-50c7-4ec3-b967-20f038731571\" (UID: \"fe13f626-50c7-4ec3-b967-20f038731571\") " Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.792946 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-fernet-keys\") pod \"fe13f626-50c7-4ec3-b967-20f038731571\" (UID: \"fe13f626-50c7-4ec3-b967-20f038731571\") " Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.792986 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-config-data\") pod \"fe13f626-50c7-4ec3-b967-20f038731571\" (UID: \"fe13f626-50c7-4ec3-b967-20f038731571\") " Nov 26 14:44:10 crc kubenswrapper[5037]: E1126 14:44:10.798155 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff is running failed: container process not found" containerID="1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.798371 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fe13f626-50c7-4ec3-b967-20f038731571-kube-api-access-bbtq5" (OuterVolumeSpecName: "kube-api-access-bbtq5") pod "fe13f626-50c7-4ec3-b967-20f038731571" (UID: "fe13f626-50c7-4ec3-b967-20f038731571"). InnerVolumeSpecName "kube-api-access-bbtq5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.798618 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "fe13f626-50c7-4ec3-b967-20f038731571" (UID: "fe13f626-50c7-4ec3-b967-20f038731571"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:10 crc kubenswrapper[5037]: E1126 14:44:10.798649 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff is running failed: container process not found" containerID="1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 14:44:10 crc kubenswrapper[5037]: E1126 14:44:10.798965 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="dc60cd871e55f538b7db49d446338e526894553aaa076fdbe1e2f04a853fb486" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.800940 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "fe13f626-50c7-4ec3-b967-20f038731571" (UID: "fe13f626-50c7-4ec3-b967-20f038731571"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:10 crc kubenswrapper[5037]: E1126 14:44:10.801004 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="dc60cd871e55f538b7db49d446338e526894553aaa076fdbe1e2f04a853fb486" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.801010 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-scripts" (OuterVolumeSpecName: "scripts") pod "fe13f626-50c7-4ec3-b967-20f038731571" (UID: "fe13f626-50c7-4ec3-b967-20f038731571"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:10 crc kubenswrapper[5037]: E1126 14:44:10.801053 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff is running failed: container process not found" containerID="1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 14:44:10 crc kubenswrapper[5037]: E1126 14:44:10.801079 5037 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-264cs" podUID="80ce8a9a-aa28-40e4-ac35-c7d379224208" containerName="ovsdb-server" Nov 26 14:44:10 crc kubenswrapper[5037]: E1126 14:44:10.806869 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="dc60cd871e55f538b7db49d446338e526894553aaa076fdbe1e2f04a853fb486" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 14:44:10 crc kubenswrapper[5037]: E1126 14:44:10.806916 5037 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-264cs" podUID="80ce8a9a-aa28-40e4-ac35-c7d379224208" containerName="ovs-vswitchd" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.822378 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-config-data" (OuterVolumeSpecName: "config-data") pod "fe13f626-50c7-4ec3-b967-20f038731571" (UID: "fe13f626-50c7-4ec3-b967-20f038731571"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.823365 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_ec26620a-6ad8-4792-bb25-543dc31d3be5/ovn-northd/0.log" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.823432 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.834880 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fe13f626-50c7-4ec3-b967-20f038731571" (UID: "fe13f626-50c7-4ec3-b967-20f038731571"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.844543 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "fe13f626-50c7-4ec3-b967-20f038731571" (UID: "fe13f626-50c7-4ec3-b967-20f038731571"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.860499 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "fe13f626-50c7-4ec3-b967-20f038731571" (UID: "fe13f626-50c7-4ec3-b967-20f038731571"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.895685 5037 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.895721 5037 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.895733 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.895748 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.895761 5037 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.895772 5037 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.895783 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bbtq5\" (UniqueName: \"kubernetes.io/projected/fe13f626-50c7-4ec3-b967-20f038731571-kube-api-access-bbtq5\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.895796 5037 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/fe13f626-50c7-4ec3-b967-20f038731571-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:10 crc kubenswrapper[5037]: E1126 14:44:10.895887 5037 secret.go:188] Couldn't get secret openstack/cinder-scripts: secret "cinder-scripts" not found Nov 26 14:44:10 crc kubenswrapper[5037]: E1126 14:44:10.895938 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-scripts podName:fe17b260-d105-4274-88d1-d85fd9948f9f nodeName:}" failed. No retries permitted until 2025-11-26 14:44:18.895921509 +0000 UTC m=+1725.692691693 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "scripts" (UniqueName: "kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-scripts") pod "cinder-scheduler-0" (UID: "fe17b260-d105-4274-88d1-d85fd9948f9f") : secret "cinder-scripts" not found Nov 26 14:44:10 crc kubenswrapper[5037]: E1126 14:44:10.896583 5037 secret.go:188] Couldn't get secret openstack/cinder-scheduler-config-data: secret "cinder-scheduler-config-data" not found Nov 26 14:44:10 crc kubenswrapper[5037]: E1126 14:44:10.896621 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-config-data-custom podName:fe17b260-d105-4274-88d1-d85fd9948f9f nodeName:}" failed. No retries permitted until 2025-11-26 14:44:18.896611285 +0000 UTC m=+1725.693381469 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data-custom" (UniqueName: "kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-config-data-custom") pod "cinder-scheduler-0" (UID: "fe17b260-d105-4274-88d1-d85fd9948f9f") : secret "cinder-scheduler-config-data" not found Nov 26 14:44:10 crc kubenswrapper[5037]: E1126 14:44:10.896664 5037 secret.go:188] Couldn't get secret openstack/cinder-config-data: secret "cinder-config-data" not found Nov 26 14:44:10 crc kubenswrapper[5037]: E1126 14:44:10.896691 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-config-data podName:fe17b260-d105-4274-88d1-d85fd9948f9f nodeName:}" failed. No retries permitted until 2025-11-26 14:44:18.896683147 +0000 UTC m=+1725.693453331 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-config-data") pod "cinder-scheduler-0" (UID: "fe17b260-d105-4274-88d1-d85fd9948f9f") : secret "cinder-config-data" not found Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.998619 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec26620a-6ad8-4792-bb25-543dc31d3be5-ovn-northd-tls-certs\") pod \"ec26620a-6ad8-4792-bb25-543dc31d3be5\" (UID: \"ec26620a-6ad8-4792-bb25-543dc31d3be5\") " Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.999031 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbmnh\" (UniqueName: \"kubernetes.io/projected/ec26620a-6ad8-4792-bb25-543dc31d3be5-kube-api-access-dbmnh\") pod \"ec26620a-6ad8-4792-bb25-543dc31d3be5\" (UID: \"ec26620a-6ad8-4792-bb25-543dc31d3be5\") " Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.999075 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec26620a-6ad8-4792-bb25-543dc31d3be5-combined-ca-bundle\") pod \"ec26620a-6ad8-4792-bb25-543dc31d3be5\" (UID: \"ec26620a-6ad8-4792-bb25-543dc31d3be5\") " Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.999201 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec26620a-6ad8-4792-bb25-543dc31d3be5-metrics-certs-tls-certs\") pod \"ec26620a-6ad8-4792-bb25-543dc31d3be5\" (UID: \"ec26620a-6ad8-4792-bb25-543dc31d3be5\") " Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.999326 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ec26620a-6ad8-4792-bb25-543dc31d3be5-ovn-rundir\") pod \"ec26620a-6ad8-4792-bb25-543dc31d3be5\" (UID: \"ec26620a-6ad8-4792-bb25-543dc31d3be5\") " Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.999363 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ec26620a-6ad8-4792-bb25-543dc31d3be5-config\") pod \"ec26620a-6ad8-4792-bb25-543dc31d3be5\" (UID: \"ec26620a-6ad8-4792-bb25-543dc31d3be5\") " Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.999397 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ec26620a-6ad8-4792-bb25-543dc31d3be5-scripts\") pod \"ec26620a-6ad8-4792-bb25-543dc31d3be5\" (UID: \"ec26620a-6ad8-4792-bb25-543dc31d3be5\") " Nov 26 14:44:10 crc kubenswrapper[5037]: I1126 14:44:10.999628 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ec26620a-6ad8-4792-bb25-543dc31d3be5-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "ec26620a-6ad8-4792-bb25-543dc31d3be5" (UID: "ec26620a-6ad8-4792-bb25-543dc31d3be5"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.000173 5037 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ec26620a-6ad8-4792-bb25-543dc31d3be5-ovn-rundir\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.001865 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ec26620a-6ad8-4792-bb25-543dc31d3be5-config" (OuterVolumeSpecName: "config") pod "ec26620a-6ad8-4792-bb25-543dc31d3be5" (UID: "ec26620a-6ad8-4792-bb25-543dc31d3be5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.001904 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ec26620a-6ad8-4792-bb25-543dc31d3be5-scripts" (OuterVolumeSpecName: "scripts") pod "ec26620a-6ad8-4792-bb25-543dc31d3be5" (UID: "ec26620a-6ad8-4792-bb25-543dc31d3be5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.016791 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec26620a-6ad8-4792-bb25-543dc31d3be5-kube-api-access-dbmnh" (OuterVolumeSpecName: "kube-api-access-dbmnh") pod "ec26620a-6ad8-4792-bb25-543dc31d3be5" (UID: "ec26620a-6ad8-4792-bb25-543dc31d3be5"). InnerVolumeSpecName "kube-api-access-dbmnh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.023107 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec26620a-6ad8-4792-bb25-543dc31d3be5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ec26620a-6ad8-4792-bb25-543dc31d3be5" (UID: "ec26620a-6ad8-4792-bb25-543dc31d3be5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.073742 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec26620a-6ad8-4792-bb25-543dc31d3be5-ovn-northd-tls-certs" (OuterVolumeSpecName: "ovn-northd-tls-certs") pod "ec26620a-6ad8-4792-bb25-543dc31d3be5" (UID: "ec26620a-6ad8-4792-bb25-543dc31d3be5"). InnerVolumeSpecName "ovn-northd-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.075546 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec26620a-6ad8-4792-bb25-543dc31d3be5-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "ec26620a-6ad8-4792-bb25-543dc31d3be5" (UID: "ec26620a-6ad8-4792-bb25-543dc31d3be5"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.101859 5037 reconciler_common.go:293] "Volume detached for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec26620a-6ad8-4792-bb25-543dc31d3be5-ovn-northd-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.101894 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbmnh\" (UniqueName: \"kubernetes.io/projected/ec26620a-6ad8-4792-bb25-543dc31d3be5-kube-api-access-dbmnh\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.101909 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec26620a-6ad8-4792-bb25-543dc31d3be5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.101920 5037 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec26620a-6ad8-4792-bb25-543dc31d3be5-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.101934 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ec26620a-6ad8-4792-bb25-543dc31d3be5-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.101946 5037 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ec26620a-6ad8-4792-bb25-543dc31d3be5-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.407523 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_ec26620a-6ad8-4792-bb25-543dc31d3be5/ovn-northd/0.log" Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.407610 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"ec26620a-6ad8-4792-bb25-543dc31d3be5","Type":"ContainerDied","Data":"42dcb3ac7ab34f187ccdcfaf4bb9e02e3e8ae4d1454ba4529ee3898b2b5d4db3"} Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.407639 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.407649 5037 scope.go:117] "RemoveContainer" containerID="ea28ba554ccf3be563e142ef9810c318f1a7398137617c44deec729fa9ddf87d" Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.411427 5037 generic.go:334] "Generic (PLEG): container finished" podID="7f05291f-1331-411b-9971-c71218d11a35" containerID="4483535c43e875eaf8b876f0ce67748ccef6e9a8c9dba169d9e8c3b8043014ae" exitCode=0 Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.411504 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"7f05291f-1331-411b-9971-c71218d11a35","Type":"ContainerDied","Data":"4483535c43e875eaf8b876f0ce67748ccef6e9a8c9dba169d9e8c3b8043014ae"} Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.412831 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-fb548d49-hf8zh" event={"ID":"fe13f626-50c7-4ec3-b967-20f038731571","Type":"ContainerDied","Data":"b56f68978e382944afd896d3bf1e868b543a2597c945b02949a81cc88ef83c5a"} Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.412908 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-fb548d49-hf8zh" Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.486579 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-7f55999cfc-jx9r6" podUID="aed636f4-272c-4379-a6f3-8247ae0e46cc" containerName="proxy-server" probeResult="failure" output="Get \"https://10.217.0.165:8080/healthcheck\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.486703 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-7f55999cfc-jx9r6" podUID="aed636f4-272c-4379-a6f3-8247ae0e46cc" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.165:8080/healthcheck\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.503485 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="7f05291f-1331-411b-9971-c71218d11a35" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.101:5671: connect: connection refused" Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.578176 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-fb548d49-hf8zh"] Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.590864 5037 scope.go:117] "RemoveContainer" containerID="d3158b8703e1c139eecff816090fc54bf7b1598ce59a6a91d56a6bde613e9529" Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.602219 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-fb548d49-hf8zh"] Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.609227 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.615530 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-northd-0"] Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.636586 5037 scope.go:117] "RemoveContainer" containerID="ca5593d895153686d827f3a444c0ce51200735ce910a9e9d65ec173d66664c8b" Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.866796 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.890301 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.927551 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="07720f90-b6f7-4b81-9c32-17f1e72b19fa" path="/var/lib/kubelet/pods/07720f90-b6f7-4b81-9c32-17f1e72b19fa/volumes" Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.928255 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="10cd5eda-54cc-4c0a-91ca-4f8217e5220e" path="/var/lib/kubelet/pods/10cd5eda-54cc-4c0a-91ca-4f8217e5220e/volumes" Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.928714 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b53df32-369f-4a91-bb97-5da067cc3c6a" path="/var/lib/kubelet/pods/5b53df32-369f-4a91-bb97-5da067cc3c6a/volumes" Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.929949 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="693d1a99-bf33-42ee-adea-2f8ce0f6c002" path="/var/lib/kubelet/pods/693d1a99-bf33-42ee-adea-2f8ce0f6c002/volumes" Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.930701 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d49cc40-ce20-415f-a979-398430c2bd81" path="/var/lib/kubelet/pods/6d49cc40-ce20-415f-a979-398430c2bd81/volumes" Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.931268 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8df4197d-046b-4b35-a14a-b382bda46242" path="/var/lib/kubelet/pods/8df4197d-046b-4b35-a14a-b382bda46242/volumes" Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.932225 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b0797697-2b6d-4684-9fe1-e17a91f80369" path="/var/lib/kubelet/pods/b0797697-2b6d-4684-9fe1-e17a91f80369/volumes" Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.932885 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b3dc5e2c-0729-4f4d-8481-bd8fb0064a80" path="/var/lib/kubelet/pods/b3dc5e2c-0729-4f4d-8481-bd8fb0064a80/volumes" Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.933501 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cff988a9-69e2-42cc-a456-426f13be8a58" path="/var/lib/kubelet/pods/cff988a9-69e2-42cc-a456-426f13be8a58/volumes" Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.934854 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d4fd340f-f656-4ec3-aba1-a33eaa58aed0" path="/var/lib/kubelet/pods/d4fd340f-f656-4ec3-aba1-a33eaa58aed0/volumes" Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.935600 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e" path="/var/lib/kubelet/pods/e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e/volumes" Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.936246 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ec26620a-6ad8-4792-bb25-543dc31d3be5" path="/var/lib/kubelet/pods/ec26620a-6ad8-4792-bb25-543dc31d3be5/volumes" Nov 26 14:44:11 crc kubenswrapper[5037]: I1126 14:44:11.937222 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fe13f626-50c7-4ec3-b967-20f038731571" path="/var/lib/kubelet/pods/fe13f626-50c7-4ec3-b967-20f038731571/volumes" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.015703 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7f05291f-1331-411b-9971-c71218d11a35-plugins-conf\") pod \"7f05291f-1331-411b-9971-c71218d11a35\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.015772 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ba78b94a-32d0-4377-ac41-ffd036b241bf-config-data\") pod \"ba78b94a-32d0-4377-ac41-ffd036b241bf\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.015808 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7f05291f-1331-411b-9971-c71218d11a35-rabbitmq-confd\") pod \"7f05291f-1331-411b-9971-c71218d11a35\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.015841 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ba78b94a-32d0-4377-ac41-ffd036b241bf-rabbitmq-erlang-cookie\") pod \"ba78b94a-32d0-4377-ac41-ffd036b241bf\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.015870 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7f05291f-1331-411b-9971-c71218d11a35-server-conf\") pod \"7f05291f-1331-411b-9971-c71218d11a35\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.015890 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ba78b94a-32d0-4377-ac41-ffd036b241bf-plugins-conf\") pod \"ba78b94a-32d0-4377-ac41-ffd036b241bf\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.015924 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"7f05291f-1331-411b-9971-c71218d11a35\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.015949 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kksth\" (UniqueName: \"kubernetes.io/projected/7f05291f-1331-411b-9971-c71218d11a35-kube-api-access-kksth\") pod \"7f05291f-1331-411b-9971-c71218d11a35\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.015976 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ba78b94a-32d0-4377-ac41-ffd036b241bf-rabbitmq-tls\") pod \"ba78b94a-32d0-4377-ac41-ffd036b241bf\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.016025 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ba78b94a-32d0-4377-ac41-ffd036b241bf-pod-info\") pod \"ba78b94a-32d0-4377-ac41-ffd036b241bf\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.016054 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ba78b94a-32d0-4377-ac41-ffd036b241bf-rabbitmq-confd\") pod \"ba78b94a-32d0-4377-ac41-ffd036b241bf\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.016081 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ba78b94a-32d0-4377-ac41-ffd036b241bf\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.016101 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ba78b94a-32d0-4377-ac41-ffd036b241bf-server-conf\") pod \"ba78b94a-32d0-4377-ac41-ffd036b241bf\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.016125 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7f05291f-1331-411b-9971-c71218d11a35-rabbitmq-plugins\") pod \"7f05291f-1331-411b-9971-c71218d11a35\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.016156 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ba78b94a-32d0-4377-ac41-ffd036b241bf-rabbitmq-plugins\") pod \"ba78b94a-32d0-4377-ac41-ffd036b241bf\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.016176 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7f05291f-1331-411b-9971-c71218d11a35-rabbitmq-tls\") pod \"7f05291f-1331-411b-9971-c71218d11a35\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.016206 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7f05291f-1331-411b-9971-c71218d11a35-config-data\") pod \"7f05291f-1331-411b-9971-c71218d11a35\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.016247 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7f05291f-1331-411b-9971-c71218d11a35-pod-info\") pod \"7f05291f-1331-411b-9971-c71218d11a35\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.016297 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ba78b94a-32d0-4377-ac41-ffd036b241bf-erlang-cookie-secret\") pod \"ba78b94a-32d0-4377-ac41-ffd036b241bf\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.016347 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7f05291f-1331-411b-9971-c71218d11a35-rabbitmq-erlang-cookie\") pod \"7f05291f-1331-411b-9971-c71218d11a35\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.016376 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7f05291f-1331-411b-9971-c71218d11a35-erlang-cookie-secret\") pod \"7f05291f-1331-411b-9971-c71218d11a35\" (UID: \"7f05291f-1331-411b-9971-c71218d11a35\") " Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.017165 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n275f\" (UniqueName: \"kubernetes.io/projected/ba78b94a-32d0-4377-ac41-ffd036b241bf-kube-api-access-n275f\") pod \"ba78b94a-32d0-4377-ac41-ffd036b241bf\" (UID: \"ba78b94a-32d0-4377-ac41-ffd036b241bf\") " Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.018720 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7f05291f-1331-411b-9971-c71218d11a35-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "7f05291f-1331-411b-9971-c71218d11a35" (UID: "7f05291f-1331-411b-9971-c71218d11a35"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.018716 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7f05291f-1331-411b-9971-c71218d11a35-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "7f05291f-1331-411b-9971-c71218d11a35" (UID: "7f05291f-1331-411b-9971-c71218d11a35"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.018956 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba78b94a-32d0-4377-ac41-ffd036b241bf-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "ba78b94a-32d0-4377-ac41-ffd036b241bf" (UID: "ba78b94a-32d0-4377-ac41-ffd036b241bf"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.018970 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7f05291f-1331-411b-9971-c71218d11a35-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "7f05291f-1331-411b-9971-c71218d11a35" (UID: "7f05291f-1331-411b-9971-c71218d11a35"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.020955 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/ba78b94a-32d0-4377-ac41-ffd036b241bf-pod-info" (OuterVolumeSpecName: "pod-info") pod "ba78b94a-32d0-4377-ac41-ffd036b241bf" (UID: "ba78b94a-32d0-4377-ac41-ffd036b241bf"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.022197 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f05291f-1331-411b-9971-c71218d11a35-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "7f05291f-1331-411b-9971-c71218d11a35" (UID: "7f05291f-1331-411b-9971-c71218d11a35"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.022734 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba78b94a-32d0-4377-ac41-ffd036b241bf-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "ba78b94a-32d0-4377-ac41-ffd036b241bf" (UID: "ba78b94a-32d0-4377-ac41-ffd036b241bf"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.022745 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ba78b94a-32d0-4377-ac41-ffd036b241bf-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "ba78b94a-32d0-4377-ac41-ffd036b241bf" (UID: "ba78b94a-32d0-4377-ac41-ffd036b241bf"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.023165 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba78b94a-32d0-4377-ac41-ffd036b241bf-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "ba78b94a-32d0-4377-ac41-ffd036b241bf" (UID: "ba78b94a-32d0-4377-ac41-ffd036b241bf"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.023223 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f05291f-1331-411b-9971-c71218d11a35-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "7f05291f-1331-411b-9971-c71218d11a35" (UID: "7f05291f-1331-411b-9971-c71218d11a35"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.023529 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "persistence") pod "ba78b94a-32d0-4377-ac41-ffd036b241bf" (UID: "ba78b94a-32d0-4377-ac41-ffd036b241bf"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.023974 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/7f05291f-1331-411b-9971-c71218d11a35-pod-info" (OuterVolumeSpecName: "pod-info") pod "7f05291f-1331-411b-9971-c71218d11a35" (UID: "7f05291f-1331-411b-9971-c71218d11a35"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.026258 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f05291f-1331-411b-9971-c71218d11a35-kube-api-access-kksth" (OuterVolumeSpecName: "kube-api-access-kksth") pod "7f05291f-1331-411b-9971-c71218d11a35" (UID: "7f05291f-1331-411b-9971-c71218d11a35"). InnerVolumeSpecName "kube-api-access-kksth". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.026908 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba78b94a-32d0-4377-ac41-ffd036b241bf-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "ba78b94a-32d0-4377-ac41-ffd036b241bf" (UID: "ba78b94a-32d0-4377-ac41-ffd036b241bf"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.027082 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba78b94a-32d0-4377-ac41-ffd036b241bf-kube-api-access-n275f" (OuterVolumeSpecName: "kube-api-access-n275f") pod "ba78b94a-32d0-4377-ac41-ffd036b241bf" (UID: "ba78b94a-32d0-4377-ac41-ffd036b241bf"). InnerVolumeSpecName "kube-api-access-n275f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.029406 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "persistence") pod "7f05291f-1331-411b-9971-c71218d11a35" (UID: "7f05291f-1331-411b-9971-c71218d11a35"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.063423 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7f05291f-1331-411b-9971-c71218d11a35-server-conf" (OuterVolumeSpecName: "server-conf") pod "7f05291f-1331-411b-9971-c71218d11a35" (UID: "7f05291f-1331-411b-9971-c71218d11a35"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.066732 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ba78b94a-32d0-4377-ac41-ffd036b241bf-server-conf" (OuterVolumeSpecName: "server-conf") pod "ba78b94a-32d0-4377-ac41-ffd036b241bf" (UID: "ba78b94a-32d0-4377-ac41-ffd036b241bf"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.077963 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7f05291f-1331-411b-9971-c71218d11a35-config-data" (OuterVolumeSpecName: "config-data") pod "7f05291f-1331-411b-9971-c71218d11a35" (UID: "7f05291f-1331-411b-9971-c71218d11a35"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.081979 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ba78b94a-32d0-4377-ac41-ffd036b241bf-config-data" (OuterVolumeSpecName: "config-data") pod "ba78b94a-32d0-4377-ac41-ffd036b241bf" (UID: "ba78b94a-32d0-4377-ac41-ffd036b241bf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.120697 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ba78b94a-32d0-4377-ac41-ffd036b241bf-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.121124 5037 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ba78b94a-32d0-4377-ac41-ffd036b241bf-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.122178 5037 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7f05291f-1331-411b-9971-c71218d11a35-server-conf\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.122348 5037 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ba78b94a-32d0-4377-ac41-ffd036b241bf-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.122508 5037 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.122647 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kksth\" (UniqueName: \"kubernetes.io/projected/7f05291f-1331-411b-9971-c71218d11a35-kube-api-access-kksth\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.123203 5037 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ba78b94a-32d0-4377-ac41-ffd036b241bf-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.123339 5037 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ba78b94a-32d0-4377-ac41-ffd036b241bf-pod-info\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.123734 5037 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.124259 5037 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ba78b94a-32d0-4377-ac41-ffd036b241bf-server-conf\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.124396 5037 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7f05291f-1331-411b-9971-c71218d11a35-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.124488 5037 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ba78b94a-32d0-4377-ac41-ffd036b241bf-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.126748 5037 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7f05291f-1331-411b-9971-c71218d11a35-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.126832 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7f05291f-1331-411b-9971-c71218d11a35-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.126898 5037 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7f05291f-1331-411b-9971-c71218d11a35-pod-info\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.126976 5037 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ba78b94a-32d0-4377-ac41-ffd036b241bf-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.127039 5037 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7f05291f-1331-411b-9971-c71218d11a35-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.127102 5037 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7f05291f-1331-411b-9971-c71218d11a35-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.127195 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n275f\" (UniqueName: \"kubernetes.io/projected/ba78b94a-32d0-4377-ac41-ffd036b241bf-kube-api-access-n275f\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.127314 5037 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7f05291f-1331-411b-9971-c71218d11a35-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.128327 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f05291f-1331-411b-9971-c71218d11a35-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "7f05291f-1331-411b-9971-c71218d11a35" (UID: "7f05291f-1331-411b-9971-c71218d11a35"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.140280 5037 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.145589 5037 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.147425 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba78b94a-32d0-4377-ac41-ffd036b241bf-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "ba78b94a-32d0-4377-ac41-ffd036b241bf" (UID: "ba78b94a-32d0-4377-ac41-ffd036b241bf"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.228844 5037 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7f05291f-1331-411b-9971-c71218d11a35-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.229139 5037 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.229149 5037 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ba78b94a-32d0-4377-ac41-ffd036b241bf-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.229157 5037 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.421917 5037 generic.go:334] "Generic (PLEG): container finished" podID="ba78b94a-32d0-4377-ac41-ffd036b241bf" containerID="74f68fdb96d374b8d9906137608e1412d3d306ce1e1daedf2234bd65a15de9cc" exitCode=0 Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.421981 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.421985 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"ba78b94a-32d0-4377-ac41-ffd036b241bf","Type":"ContainerDied","Data":"74f68fdb96d374b8d9906137608e1412d3d306ce1e1daedf2234bd65a15de9cc"} Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.422111 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"ba78b94a-32d0-4377-ac41-ffd036b241bf","Type":"ContainerDied","Data":"ea19be20ec1a9986458ca26a10483190304f96a7d56d35f1e5efc302d4be13ee"} Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.422133 5037 scope.go:117] "RemoveContainer" containerID="74f68fdb96d374b8d9906137608e1412d3d306ce1e1daedf2234bd65a15de9cc" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.436265 5037 generic.go:334] "Generic (PLEG): container finished" podID="7b90229c-2a39-4627-896f-9c1b27e4f1d5" containerID="71d63cfc921e9e46b869f583ba1be1fd0b73e384b0a9b8c0e83735a75f13ecda" exitCode=0 Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.436343 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7b90229c-2a39-4627-896f-9c1b27e4f1d5","Type":"ContainerDied","Data":"71d63cfc921e9e46b869f583ba1be1fd0b73e384b0a9b8c0e83735a75f13ecda"} Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.439636 5037 generic.go:334] "Generic (PLEG): container finished" podID="19ae84d4-26f8-4e11-bd01-da880def5547" containerID="73ec95358a687154b2f7af7ab67ff687aabcbeb867fdaa97bcb29864cc40d8c1" exitCode=0 Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.439844 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7ddc4956b6-dfqsv" event={"ID":"19ae84d4-26f8-4e11-bd01-da880def5547","Type":"ContainerDied","Data":"73ec95358a687154b2f7af7ab67ff687aabcbeb867fdaa97bcb29864cc40d8c1"} Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.442971 5037 generic.go:334] "Generic (PLEG): container finished" podID="a97b4f35-04a7-47c3-a658-170645023de6" containerID="25004b7d7570b0227e943b0f10767fefe0da178777c48537fb23de224173d062" exitCode=0 Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.443135 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7ccc6df59c-m5tjx" event={"ID":"a97b4f35-04a7-47c3-a658-170645023de6","Type":"ContainerDied","Data":"25004b7d7570b0227e943b0f10767fefe0da178777c48537fb23de224173d062"} Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.456937 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"7f05291f-1331-411b-9971-c71218d11a35","Type":"ContainerDied","Data":"ab253e4e7b760be53f981740689112a54ee580da41e5e706ef1538a995af2a47"} Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.456982 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.481278 5037 scope.go:117] "RemoveContainer" containerID="773034796f31390fe28fdc58e1e871d5f541491426df6c51095f768444fbd35d" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.518787 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.524637 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.533179 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.538388 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.541717 5037 scope.go:117] "RemoveContainer" containerID="74f68fdb96d374b8d9906137608e1412d3d306ce1e1daedf2234bd65a15de9cc" Nov 26 14:44:12 crc kubenswrapper[5037]: E1126 14:44:12.542147 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74f68fdb96d374b8d9906137608e1412d3d306ce1e1daedf2234bd65a15de9cc\": container with ID starting with 74f68fdb96d374b8d9906137608e1412d3d306ce1e1daedf2234bd65a15de9cc not found: ID does not exist" containerID="74f68fdb96d374b8d9906137608e1412d3d306ce1e1daedf2234bd65a15de9cc" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.542181 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74f68fdb96d374b8d9906137608e1412d3d306ce1e1daedf2234bd65a15de9cc"} err="failed to get container status \"74f68fdb96d374b8d9906137608e1412d3d306ce1e1daedf2234bd65a15de9cc\": rpc error: code = NotFound desc = could not find container \"74f68fdb96d374b8d9906137608e1412d3d306ce1e1daedf2234bd65a15de9cc\": container with ID starting with 74f68fdb96d374b8d9906137608e1412d3d306ce1e1daedf2234bd65a15de9cc not found: ID does not exist" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.542201 5037 scope.go:117] "RemoveContainer" containerID="773034796f31390fe28fdc58e1e871d5f541491426df6c51095f768444fbd35d" Nov 26 14:44:12 crc kubenswrapper[5037]: E1126 14:44:12.542630 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"773034796f31390fe28fdc58e1e871d5f541491426df6c51095f768444fbd35d\": container with ID starting with 773034796f31390fe28fdc58e1e871d5f541491426df6c51095f768444fbd35d not found: ID does not exist" containerID="773034796f31390fe28fdc58e1e871d5f541491426df6c51095f768444fbd35d" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.542707 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"773034796f31390fe28fdc58e1e871d5f541491426df6c51095f768444fbd35d"} err="failed to get container status \"773034796f31390fe28fdc58e1e871d5f541491426df6c51095f768444fbd35d\": rpc error: code = NotFound desc = could not find container \"773034796f31390fe28fdc58e1e871d5f541491426df6c51095f768444fbd35d\": container with ID starting with 773034796f31390fe28fdc58e1e871d5f541491426df6c51095f768444fbd35d not found: ID does not exist" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.542741 5037 scope.go:117] "RemoveContainer" containerID="4483535c43e875eaf8b876f0ce67748ccef6e9a8c9dba169d9e8c3b8043014ae" Nov 26 14:44:12 crc kubenswrapper[5037]: E1126 14:44:12.556279 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="df7832f00f3c308d592d1eaea2015808ef735d6b8ec275b8ae637538886591de" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Nov 26 14:44:12 crc kubenswrapper[5037]: E1126 14:44:12.557536 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="df7832f00f3c308d592d1eaea2015808ef735d6b8ec275b8ae637538886591de" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Nov 26 14:44:12 crc kubenswrapper[5037]: E1126 14:44:12.558528 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="df7832f00f3c308d592d1eaea2015808ef735d6b8ec275b8ae637538886591de" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Nov 26 14:44:12 crc kubenswrapper[5037]: E1126 14:44:12.558623 5037 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="bf45bdb2-c880-43f7-b30a-4d1b36363f7d" containerName="galera" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.563076 5037 scope.go:117] "RemoveContainer" containerID="1349d55b286187786ca0c0752c21570fa4516f326b8465f8b5bb44574d1252f9" Nov 26 14:44:12 crc kubenswrapper[5037]: I1126 14:44:12.947847 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-7ddc4956b6-dfqsv" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.040622 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kb9w4\" (UniqueName: \"kubernetes.io/projected/19ae84d4-26f8-4e11-bd01-da880def5547-kube-api-access-kb9w4\") pod \"19ae84d4-26f8-4e11-bd01-da880def5547\" (UID: \"19ae84d4-26f8-4e11-bd01-da880def5547\") " Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.041176 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19ae84d4-26f8-4e11-bd01-da880def5547-combined-ca-bundle\") pod \"19ae84d4-26f8-4e11-bd01-da880def5547\" (UID: \"19ae84d4-26f8-4e11-bd01-da880def5547\") " Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.041309 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/19ae84d4-26f8-4e11-bd01-da880def5547-logs\") pod \"19ae84d4-26f8-4e11-bd01-da880def5547\" (UID: \"19ae84d4-26f8-4e11-bd01-da880def5547\") " Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.041347 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19ae84d4-26f8-4e11-bd01-da880def5547-config-data\") pod \"19ae84d4-26f8-4e11-bd01-da880def5547\" (UID: \"19ae84d4-26f8-4e11-bd01-da880def5547\") " Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.041384 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/19ae84d4-26f8-4e11-bd01-da880def5547-config-data-custom\") pod \"19ae84d4-26f8-4e11-bd01-da880def5547\" (UID: \"19ae84d4-26f8-4e11-bd01-da880def5547\") " Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.041795 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/19ae84d4-26f8-4e11-bd01-da880def5547-logs" (OuterVolumeSpecName: "logs") pod "19ae84d4-26f8-4e11-bd01-da880def5547" (UID: "19ae84d4-26f8-4e11-bd01-da880def5547"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.041943 5037 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/19ae84d4-26f8-4e11-bd01-da880def5547-logs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.044251 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/19ae84d4-26f8-4e11-bd01-da880def5547-kube-api-access-kb9w4" (OuterVolumeSpecName: "kube-api-access-kb9w4") pod "19ae84d4-26f8-4e11-bd01-da880def5547" (UID: "19ae84d4-26f8-4e11-bd01-da880def5547"). InnerVolumeSpecName "kube-api-access-kb9w4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.049285 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19ae84d4-26f8-4e11-bd01-da880def5547-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "19ae84d4-26f8-4e11-bd01-da880def5547" (UID: "19ae84d4-26f8-4e11-bd01-da880def5547"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.078386 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19ae84d4-26f8-4e11-bd01-da880def5547-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "19ae84d4-26f8-4e11-bd01-da880def5547" (UID: "19ae84d4-26f8-4e11-bd01-da880def5547"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.092985 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19ae84d4-26f8-4e11-bd01-da880def5547-config-data" (OuterVolumeSpecName: "config-data") pod "19ae84d4-26f8-4e11-bd01-da880def5547" (UID: "19ae84d4-26f8-4e11-bd01-da880def5547"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.143213 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kb9w4\" (UniqueName: \"kubernetes.io/projected/19ae84d4-26f8-4e11-bd01-da880def5547-kube-api-access-kb9w4\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.143250 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19ae84d4-26f8-4e11-bd01-da880def5547-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.143262 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19ae84d4-26f8-4e11-bd01-da880def5547-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.143272 5037 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/19ae84d4-26f8-4e11-bd01-da880def5547-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.200661 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7ccc6df59c-m5tjx" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.207954 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.310701 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.346266 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b90229c-2a39-4627-896f-9c1b27e4f1d5-combined-ca-bundle\") pod \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\" (UID: \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\") " Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.346325 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a97b4f35-04a7-47c3-a658-170645023de6-internal-tls-certs\") pod \"a97b4f35-04a7-47c3-a658-170645023de6\" (UID: \"a97b4f35-04a7-47c3-a658-170645023de6\") " Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.346416 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a97b4f35-04a7-47c3-a658-170645023de6-public-tls-certs\") pod \"a97b4f35-04a7-47c3-a658-170645023de6\" (UID: \"a97b4f35-04a7-47c3-a658-170645023de6\") " Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.346474 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xbbxj\" (UniqueName: \"kubernetes.io/projected/7b90229c-2a39-4627-896f-9c1b27e4f1d5-kube-api-access-xbbxj\") pod \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\" (UID: \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\") " Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.346550 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b90229c-2a39-4627-896f-9c1b27e4f1d5-ceilometer-tls-certs\") pod \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\" (UID: \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\") " Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.346591 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a97b4f35-04a7-47c3-a658-170645023de6-httpd-config\") pod \"a97b4f35-04a7-47c3-a658-170645023de6\" (UID: \"a97b4f35-04a7-47c3-a658-170645023de6\") " Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.346639 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7b90229c-2a39-4627-896f-9c1b27e4f1d5-run-httpd\") pod \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\" (UID: \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\") " Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.346659 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rzbxh\" (UniqueName: \"kubernetes.io/projected/a97b4f35-04a7-47c3-a658-170645023de6-kube-api-access-rzbxh\") pod \"a97b4f35-04a7-47c3-a658-170645023de6\" (UID: \"a97b4f35-04a7-47c3-a658-170645023de6\") " Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.346673 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b90229c-2a39-4627-896f-9c1b27e4f1d5-config-data\") pod \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\" (UID: \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\") " Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.346727 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7b90229c-2a39-4627-896f-9c1b27e4f1d5-log-httpd\") pod \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\" (UID: \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\") " Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.346757 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a97b4f35-04a7-47c3-a658-170645023de6-ovndb-tls-certs\") pod \"a97b4f35-04a7-47c3-a658-170645023de6\" (UID: \"a97b4f35-04a7-47c3-a658-170645023de6\") " Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.346793 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7b90229c-2a39-4627-896f-9c1b27e4f1d5-sg-core-conf-yaml\") pod \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\" (UID: \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\") " Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.346844 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a97b4f35-04a7-47c3-a658-170645023de6-config\") pod \"a97b4f35-04a7-47c3-a658-170645023de6\" (UID: \"a97b4f35-04a7-47c3-a658-170645023de6\") " Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.346905 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a97b4f35-04a7-47c3-a658-170645023de6-combined-ca-bundle\") pod \"a97b4f35-04a7-47c3-a658-170645023de6\" (UID: \"a97b4f35-04a7-47c3-a658-170645023de6\") " Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.346925 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b90229c-2a39-4627-896f-9c1b27e4f1d5-scripts\") pod \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\" (UID: \"7b90229c-2a39-4627-896f-9c1b27e4f1d5\") " Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.346998 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7b90229c-2a39-4627-896f-9c1b27e4f1d5-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "7b90229c-2a39-4627-896f-9c1b27e4f1d5" (UID: "7b90229c-2a39-4627-896f-9c1b27e4f1d5"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.347445 5037 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7b90229c-2a39-4627-896f-9c1b27e4f1d5-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.347637 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7b90229c-2a39-4627-896f-9c1b27e4f1d5-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "7b90229c-2a39-4627-896f-9c1b27e4f1d5" (UID: "7b90229c-2a39-4627-896f-9c1b27e4f1d5"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.352831 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a97b4f35-04a7-47c3-a658-170645023de6-kube-api-access-rzbxh" (OuterVolumeSpecName: "kube-api-access-rzbxh") pod "a97b4f35-04a7-47c3-a658-170645023de6" (UID: "a97b4f35-04a7-47c3-a658-170645023de6"). InnerVolumeSpecName "kube-api-access-rzbxh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.352886 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b90229c-2a39-4627-896f-9c1b27e4f1d5-kube-api-access-xbbxj" (OuterVolumeSpecName: "kube-api-access-xbbxj") pod "7b90229c-2a39-4627-896f-9c1b27e4f1d5" (UID: "7b90229c-2a39-4627-896f-9c1b27e4f1d5"). InnerVolumeSpecName "kube-api-access-xbbxj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.353486 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a97b4f35-04a7-47c3-a658-170645023de6-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "a97b4f35-04a7-47c3-a658-170645023de6" (UID: "a97b4f35-04a7-47c3-a658-170645023de6"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.354102 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b90229c-2a39-4627-896f-9c1b27e4f1d5-scripts" (OuterVolumeSpecName: "scripts") pod "7b90229c-2a39-4627-896f-9c1b27e4f1d5" (UID: "7b90229c-2a39-4627-896f-9c1b27e4f1d5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.386961 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b90229c-2a39-4627-896f-9c1b27e4f1d5-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "7b90229c-2a39-4627-896f-9c1b27e4f1d5" (UID: "7b90229c-2a39-4627-896f-9c1b27e4f1d5"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.396974 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a97b4f35-04a7-47c3-a658-170645023de6-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "a97b4f35-04a7-47c3-a658-170645023de6" (UID: "a97b4f35-04a7-47c3-a658-170645023de6"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.397551 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a97b4f35-04a7-47c3-a658-170645023de6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a97b4f35-04a7-47c3-a658-170645023de6" (UID: "a97b4f35-04a7-47c3-a658-170645023de6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.398089 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b90229c-2a39-4627-896f-9c1b27e4f1d5-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "7b90229c-2a39-4627-896f-9c1b27e4f1d5" (UID: "7b90229c-2a39-4627-896f-9c1b27e4f1d5"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.403913 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a97b4f35-04a7-47c3-a658-170645023de6-config" (OuterVolumeSpecName: "config") pod "a97b4f35-04a7-47c3-a658-170645023de6" (UID: "a97b4f35-04a7-47c3-a658-170645023de6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.412848 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a97b4f35-04a7-47c3-a658-170645023de6-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "a97b4f35-04a7-47c3-a658-170645023de6" (UID: "a97b4f35-04a7-47c3-a658-170645023de6"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.425771 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a97b4f35-04a7-47c3-a658-170645023de6-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "a97b4f35-04a7-47c3-a658-170645023de6" (UID: "a97b4f35-04a7-47c3-a658-170645023de6"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.426646 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b90229c-2a39-4627-896f-9c1b27e4f1d5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7b90229c-2a39-4627-896f-9c1b27e4f1d5" (UID: "7b90229c-2a39-4627-896f-9c1b27e4f1d5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.434490 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b90229c-2a39-4627-896f-9c1b27e4f1d5-config-data" (OuterVolumeSpecName: "config-data") pod "7b90229c-2a39-4627-896f-9c1b27e4f1d5" (UID: "7b90229c-2a39-4627-896f-9c1b27e4f1d5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.448562 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8707a232-f648-4795-b250-d29069f26514-combined-ca-bundle\") pod \"8707a232-f648-4795-b250-d29069f26514\" (UID: \"8707a232-f648-4795-b250-d29069f26514\") " Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.448647 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8707a232-f648-4795-b250-d29069f26514-config-data\") pod \"8707a232-f648-4795-b250-d29069f26514\" (UID: \"8707a232-f648-4795-b250-d29069f26514\") " Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.448755 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9f72n\" (UniqueName: \"kubernetes.io/projected/8707a232-f648-4795-b250-d29069f26514-kube-api-access-9f72n\") pod \"8707a232-f648-4795-b250-d29069f26514\" (UID: \"8707a232-f648-4795-b250-d29069f26514\") " Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.449143 5037 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/a97b4f35-04a7-47c3-a658-170645023de6-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.449164 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a97b4f35-04a7-47c3-a658-170645023de6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.449176 5037 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b90229c-2a39-4627-896f-9c1b27e4f1d5-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.449187 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b90229c-2a39-4627-896f-9c1b27e4f1d5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.449199 5037 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a97b4f35-04a7-47c3-a658-170645023de6-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.449297 5037 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a97b4f35-04a7-47c3-a658-170645023de6-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.449313 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xbbxj\" (UniqueName: \"kubernetes.io/projected/7b90229c-2a39-4627-896f-9c1b27e4f1d5-kube-api-access-xbbxj\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.449323 5037 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b90229c-2a39-4627-896f-9c1b27e4f1d5-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.449331 5037 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a97b4f35-04a7-47c3-a658-170645023de6-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.449338 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b90229c-2a39-4627-896f-9c1b27e4f1d5-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.449346 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rzbxh\" (UniqueName: \"kubernetes.io/projected/a97b4f35-04a7-47c3-a658-170645023de6-kube-api-access-rzbxh\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.449378 5037 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7b90229c-2a39-4627-896f-9c1b27e4f1d5-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.449386 5037 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a97b4f35-04a7-47c3-a658-170645023de6-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.449395 5037 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7b90229c-2a39-4627-896f-9c1b27e4f1d5-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.453592 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8707a232-f648-4795-b250-d29069f26514-kube-api-access-9f72n" (OuterVolumeSpecName: "kube-api-access-9f72n") pod "8707a232-f648-4795-b250-d29069f26514" (UID: "8707a232-f648-4795-b250-d29069f26514"). InnerVolumeSpecName "kube-api-access-9f72n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.471552 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8707a232-f648-4795-b250-d29069f26514-config-data" (OuterVolumeSpecName: "config-data") pod "8707a232-f648-4795-b250-d29069f26514" (UID: "8707a232-f648-4795-b250-d29069f26514"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.471591 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7ddc4956b6-dfqsv" event={"ID":"19ae84d4-26f8-4e11-bd01-da880def5547","Type":"ContainerDied","Data":"66b2d7343a60d8adf5a02b27199fe1a2a6aa37340e7e0a224cd54e178699dcc7"} Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.471710 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-7ddc4956b6-dfqsv" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.471855 5037 scope.go:117] "RemoveContainer" containerID="73ec95358a687154b2f7af7ab67ff687aabcbeb867fdaa97bcb29864cc40d8c1" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.474478 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7ccc6df59c-m5tjx" event={"ID":"a97b4f35-04a7-47c3-a658-170645023de6","Type":"ContainerDied","Data":"40d1a9355971e6bd47db0f88636ceaae55624b324fb80c87c755bb998fdbd44e"} Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.475006 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7ccc6df59c-m5tjx" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.477806 5037 generic.go:334] "Generic (PLEG): container finished" podID="8707a232-f648-4795-b250-d29069f26514" containerID="b658272682462e675abb3613cfedbdb070fda6a2ec653d911c6d60b0faa08bd2" exitCode=0 Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.477841 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"8707a232-f648-4795-b250-d29069f26514","Type":"ContainerDied","Data":"b658272682462e675abb3613cfedbdb070fda6a2ec653d911c6d60b0faa08bd2"} Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.477880 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"8707a232-f648-4795-b250-d29069f26514","Type":"ContainerDied","Data":"f52dfbc55babebb980f0fe8cc248430dce3d7cd8c75104d1368d16533b7fd51f"} Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.477968 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.489968 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8707a232-f648-4795-b250-d29069f26514-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8707a232-f648-4795-b250-d29069f26514" (UID: "8707a232-f648-4795-b250-d29069f26514"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.494263 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7b90229c-2a39-4627-896f-9c1b27e4f1d5","Type":"ContainerDied","Data":"b48ea63d29bf52c4bb1b9d55f2d538dc9d9558b373eafb10bbb2f277e711a5e2"} Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.494386 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.513698 5037 scope.go:117] "RemoveContainer" containerID="355cc9901e399458175cd4640ef40324803629a86ea9a4d2abc2824da07c4f8d" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.514924 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-7ddc4956b6-dfqsv"] Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.530154 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-keystone-listener-7ddc4956b6-dfqsv"] Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.542145 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-7ccc6df59c-m5tjx"] Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.548229 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-7ccc6df59c-m5tjx"] Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.550395 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8707a232-f648-4795-b250-d29069f26514-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.550434 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9f72n\" (UniqueName: \"kubernetes.io/projected/8707a232-f648-4795-b250-d29069f26514-kube-api-access-9f72n\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.550446 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8707a232-f648-4795-b250-d29069f26514-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.553520 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.558872 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.559948 5037 scope.go:117] "RemoveContainer" containerID="d787d7c57b49308ce496dd3022253165f26b5f2096403db68cdd6ea85914b8a9" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.581825 5037 scope.go:117] "RemoveContainer" containerID="25004b7d7570b0227e943b0f10767fefe0da178777c48537fb23de224173d062" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.600709 5037 scope.go:117] "RemoveContainer" containerID="b658272682462e675abb3613cfedbdb070fda6a2ec653d911c6d60b0faa08bd2" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.621934 5037 scope.go:117] "RemoveContainer" containerID="b658272682462e675abb3613cfedbdb070fda6a2ec653d911c6d60b0faa08bd2" Nov 26 14:44:13 crc kubenswrapper[5037]: E1126 14:44:13.622608 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b658272682462e675abb3613cfedbdb070fda6a2ec653d911c6d60b0faa08bd2\": container with ID starting with b658272682462e675abb3613cfedbdb070fda6a2ec653d911c6d60b0faa08bd2 not found: ID does not exist" containerID="b658272682462e675abb3613cfedbdb070fda6a2ec653d911c6d60b0faa08bd2" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.622661 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b658272682462e675abb3613cfedbdb070fda6a2ec653d911c6d60b0faa08bd2"} err="failed to get container status \"b658272682462e675abb3613cfedbdb070fda6a2ec653d911c6d60b0faa08bd2\": rpc error: code = NotFound desc = could not find container \"b658272682462e675abb3613cfedbdb070fda6a2ec653d911c6d60b0faa08bd2\": container with ID starting with b658272682462e675abb3613cfedbdb070fda6a2ec653d911c6d60b0faa08bd2 not found: ID does not exist" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.622693 5037 scope.go:117] "RemoveContainer" containerID="4b66f0aa9cd359a08c2fc701fbd668ab4e74119711476d65846830c7024d146e" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.644463 5037 scope.go:117] "RemoveContainer" containerID="e755b8c60d9bc3fba924bee940809c862f89fc6885ca06dec0c4232e6e6116ba" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.664570 5037 scope.go:117] "RemoveContainer" containerID="71d63cfc921e9e46b869f583ba1be1fd0b73e384b0a9b8c0e83735a75f13ecda" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.690369 5037 scope.go:117] "RemoveContainer" containerID="d9e7e3cabf68f8c77fe540ee66229c4f639270ea37015bbb512cd6402e09b909" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.831244 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.837755 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.921028 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="19ae84d4-26f8-4e11-bd01-da880def5547" path="/var/lib/kubelet/pods/19ae84d4-26f8-4e11-bd01-da880def5547/volumes" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.921723 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7b90229c-2a39-4627-896f-9c1b27e4f1d5" path="/var/lib/kubelet/pods/7b90229c-2a39-4627-896f-9c1b27e4f1d5/volumes" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.922988 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7f05291f-1331-411b-9971-c71218d11a35" path="/var/lib/kubelet/pods/7f05291f-1331-411b-9971-c71218d11a35/volumes" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.923521 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8707a232-f648-4795-b250-d29069f26514" path="/var/lib/kubelet/pods/8707a232-f648-4795-b250-d29069f26514/volumes" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.924042 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a97b4f35-04a7-47c3-a658-170645023de6" path="/var/lib/kubelet/pods/a97b4f35-04a7-47c3-a658-170645023de6/volumes" Nov 26 14:44:13 crc kubenswrapper[5037]: I1126 14:44:13.925272 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba78b94a-32d0-4377-ac41-ffd036b241bf" path="/var/lib/kubelet/pods/ba78b94a-32d0-4377-ac41-ffd036b241bf/volumes" Nov 26 14:44:15 crc kubenswrapper[5037]: E1126 14:44:15.796724 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff is running failed: container process not found" containerID="1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 14:44:15 crc kubenswrapper[5037]: E1126 14:44:15.797452 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff is running failed: container process not found" containerID="1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 14:44:15 crc kubenswrapper[5037]: E1126 14:44:15.797807 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff is running failed: container process not found" containerID="1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 14:44:15 crc kubenswrapper[5037]: E1126 14:44:15.797865 5037 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-264cs" podUID="80ce8a9a-aa28-40e4-ac35-c7d379224208" containerName="ovsdb-server" Nov 26 14:44:15 crc kubenswrapper[5037]: E1126 14:44:15.798701 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="dc60cd871e55f538b7db49d446338e526894553aaa076fdbe1e2f04a853fb486" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 14:44:15 crc kubenswrapper[5037]: E1126 14:44:15.800119 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="dc60cd871e55f538b7db49d446338e526894553aaa076fdbe1e2f04a853fb486" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 14:44:15 crc kubenswrapper[5037]: E1126 14:44:15.801921 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="dc60cd871e55f538b7db49d446338e526894553aaa076fdbe1e2f04a853fb486" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 14:44:15 crc kubenswrapper[5037]: E1126 14:44:15.801986 5037 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-264cs" podUID="80ce8a9a-aa28-40e4-ac35-c7d379224208" containerName="ovs-vswitchd" Nov 26 14:44:18 crc kubenswrapper[5037]: E1126 14:44:18.936842 5037 secret.go:188] Couldn't get secret openstack/cinder-scheduler-config-data: secret "cinder-scheduler-config-data" not found Nov 26 14:44:18 crc kubenswrapper[5037]: E1126 14:44:18.937353 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-config-data-custom podName:fe17b260-d105-4274-88d1-d85fd9948f9f nodeName:}" failed. No retries permitted until 2025-11-26 14:44:34.937331702 +0000 UTC m=+1741.734101896 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "config-data-custom" (UniqueName: "kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-config-data-custom") pod "cinder-scheduler-0" (UID: "fe17b260-d105-4274-88d1-d85fd9948f9f") : secret "cinder-scheduler-config-data" not found Nov 26 14:44:18 crc kubenswrapper[5037]: E1126 14:44:18.937073 5037 secret.go:188] Couldn't get secret openstack/cinder-config-data: secret "cinder-config-data" not found Nov 26 14:44:18 crc kubenswrapper[5037]: E1126 14:44:18.937131 5037 secret.go:188] Couldn't get secret openstack/cinder-scripts: secret "cinder-scripts" not found Nov 26 14:44:18 crc kubenswrapper[5037]: E1126 14:44:18.938451 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-config-data podName:fe17b260-d105-4274-88d1-d85fd9948f9f nodeName:}" failed. No retries permitted until 2025-11-26 14:44:34.938393458 +0000 UTC m=+1741.735163812 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-config-data") pod "cinder-scheduler-0" (UID: "fe17b260-d105-4274-88d1-d85fd9948f9f") : secret "cinder-config-data" not found Nov 26 14:44:18 crc kubenswrapper[5037]: E1126 14:44:18.938508 5037 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-scripts podName:fe17b260-d105-4274-88d1-d85fd9948f9f nodeName:}" failed. No retries permitted until 2025-11-26 14:44:34.9384812 +0000 UTC m=+1741.735251394 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "scripts" (UniqueName: "kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-scripts") pod "cinder-scheduler-0" (UID: "fe17b260-d105-4274-88d1-d85fd9948f9f") : secret "cinder-scripts" not found Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.199117 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.242108 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-config-data-generated\") pod \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\" (UID: \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\") " Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.242152 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-config-data-default\") pod \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\" (UID: \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\") " Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.242188 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-combined-ca-bundle\") pod \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\" (UID: \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\") " Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.242219 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-kolla-config\") pod \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\" (UID: \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\") " Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.242251 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-operator-scripts\") pod \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\" (UID: \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\") " Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.242342 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-galera-tls-certs\") pod \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\" (UID: \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\") " Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.242371 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\" (UID: \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\") " Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.242400 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bbvsm\" (UniqueName: \"kubernetes.io/projected/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-kube-api-access-bbvsm\") pod \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\" (UID: \"bf45bdb2-c880-43f7-b30a-4d1b36363f7d\") " Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.243960 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "bf45bdb2-c880-43f7-b30a-4d1b36363f7d" (UID: "bf45bdb2-c880-43f7-b30a-4d1b36363f7d"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.244031 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "bf45bdb2-c880-43f7-b30a-4d1b36363f7d" (UID: "bf45bdb2-c880-43f7-b30a-4d1b36363f7d"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.244079 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "bf45bdb2-c880-43f7-b30a-4d1b36363f7d" (UID: "bf45bdb2-c880-43f7-b30a-4d1b36363f7d"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.244221 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bf45bdb2-c880-43f7-b30a-4d1b36363f7d" (UID: "bf45bdb2-c880-43f7-b30a-4d1b36363f7d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.256573 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-kube-api-access-bbvsm" (OuterVolumeSpecName: "kube-api-access-bbvsm") pod "bf45bdb2-c880-43f7-b30a-4d1b36363f7d" (UID: "bf45bdb2-c880-43f7-b30a-4d1b36363f7d"). InnerVolumeSpecName "kube-api-access-bbvsm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.266424 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "mysql-db") pod "bf45bdb2-c880-43f7-b30a-4d1b36363f7d" (UID: "bf45bdb2-c880-43f7-b30a-4d1b36363f7d"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.268395 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bf45bdb2-c880-43f7-b30a-4d1b36363f7d" (UID: "bf45bdb2-c880-43f7-b30a-4d1b36363f7d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.287036 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "bf45bdb2-c880-43f7-b30a-4d1b36363f7d" (UID: "bf45bdb2-c880-43f7-b30a-4d1b36363f7d"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.344261 5037 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-config-data-generated\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.344763 5037 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-config-data-default\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.344871 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.344988 5037 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-kolla-config\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.345124 5037 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.345225 5037 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.345359 5037 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.345468 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bbvsm\" (UniqueName: \"kubernetes.io/projected/bf45bdb2-c880-43f7-b30a-4d1b36363f7d-kube-api-access-bbvsm\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.360250 5037 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.447540 5037 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.571915 5037 generic.go:334] "Generic (PLEG): container finished" podID="bf45bdb2-c880-43f7-b30a-4d1b36363f7d" containerID="df7832f00f3c308d592d1eaea2015808ef735d6b8ec275b8ae637538886591de" exitCode=0 Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.571965 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.571983 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"bf45bdb2-c880-43f7-b30a-4d1b36363f7d","Type":"ContainerDied","Data":"df7832f00f3c308d592d1eaea2015808ef735d6b8ec275b8ae637538886591de"} Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.572348 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"bf45bdb2-c880-43f7-b30a-4d1b36363f7d","Type":"ContainerDied","Data":"0e0c43c14d7c40f72d888f92a2463eb3fd5e8a79deb60cac803d58125d95319a"} Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.572371 5037 scope.go:117] "RemoveContainer" containerID="df7832f00f3c308d592d1eaea2015808ef735d6b8ec275b8ae637538886591de" Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.600858 5037 scope.go:117] "RemoveContainer" containerID="50c1e61dc6403c1bf62fd3bbd57c7e084ff6e25c816cb5cc6049442c76ec2eba" Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.627793 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.634528 5037 scope.go:117] "RemoveContainer" containerID="df7832f00f3c308d592d1eaea2015808ef735d6b8ec275b8ae637538886591de" Nov 26 14:44:19 crc kubenswrapper[5037]: E1126 14:44:19.635634 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"df7832f00f3c308d592d1eaea2015808ef735d6b8ec275b8ae637538886591de\": container with ID starting with df7832f00f3c308d592d1eaea2015808ef735d6b8ec275b8ae637538886591de not found: ID does not exist" containerID="df7832f00f3c308d592d1eaea2015808ef735d6b8ec275b8ae637538886591de" Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.635687 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"df7832f00f3c308d592d1eaea2015808ef735d6b8ec275b8ae637538886591de"} err="failed to get container status \"df7832f00f3c308d592d1eaea2015808ef735d6b8ec275b8ae637538886591de\": rpc error: code = NotFound desc = could not find container \"df7832f00f3c308d592d1eaea2015808ef735d6b8ec275b8ae637538886591de\": container with ID starting with df7832f00f3c308d592d1eaea2015808ef735d6b8ec275b8ae637538886591de not found: ID does not exist" Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.636034 5037 scope.go:117] "RemoveContainer" containerID="50c1e61dc6403c1bf62fd3bbd57c7e084ff6e25c816cb5cc6049442c76ec2eba" Nov 26 14:44:19 crc kubenswrapper[5037]: E1126 14:44:19.636576 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"50c1e61dc6403c1bf62fd3bbd57c7e084ff6e25c816cb5cc6049442c76ec2eba\": container with ID starting with 50c1e61dc6403c1bf62fd3bbd57c7e084ff6e25c816cb5cc6049442c76ec2eba not found: ID does not exist" containerID="50c1e61dc6403c1bf62fd3bbd57c7e084ff6e25c816cb5cc6049442c76ec2eba" Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.636635 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"50c1e61dc6403c1bf62fd3bbd57c7e084ff6e25c816cb5cc6049442c76ec2eba"} err="failed to get container status \"50c1e61dc6403c1bf62fd3bbd57c7e084ff6e25c816cb5cc6049442c76ec2eba\": rpc error: code = NotFound desc = could not find container \"50c1e61dc6403c1bf62fd3bbd57c7e084ff6e25c816cb5cc6049442c76ec2eba\": container with ID starting with 50c1e61dc6403c1bf62fd3bbd57c7e084ff6e25c816cb5cc6049442c76ec2eba not found: ID does not exist" Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.636742 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-galera-0"] Nov 26 14:44:19 crc kubenswrapper[5037]: I1126 14:44:19.919498 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf45bdb2-c880-43f7-b30a-4d1b36363f7d" path="/var/lib/kubelet/pods/bf45bdb2-c880-43f7-b30a-4d1b36363f7d/volumes" Nov 26 14:44:20 crc kubenswrapper[5037]: E1126 14:44:20.796459 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff is running failed: container process not found" containerID="1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 14:44:20 crc kubenswrapper[5037]: E1126 14:44:20.796985 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff is running failed: container process not found" containerID="1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 14:44:20 crc kubenswrapper[5037]: E1126 14:44:20.797390 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff is running failed: container process not found" containerID="1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 14:44:20 crc kubenswrapper[5037]: E1126 14:44:20.797445 5037 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-264cs" podUID="80ce8a9a-aa28-40e4-ac35-c7d379224208" containerName="ovsdb-server" Nov 26 14:44:20 crc kubenswrapper[5037]: E1126 14:44:20.798387 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="dc60cd871e55f538b7db49d446338e526894553aaa076fdbe1e2f04a853fb486" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 14:44:20 crc kubenswrapper[5037]: E1126 14:44:20.800342 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="dc60cd871e55f538b7db49d446338e526894553aaa076fdbe1e2f04a853fb486" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 14:44:20 crc kubenswrapper[5037]: E1126 14:44:20.802756 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="dc60cd871e55f538b7db49d446338e526894553aaa076fdbe1e2f04a853fb486" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 14:44:20 crc kubenswrapper[5037]: E1126 14:44:20.802860 5037 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-264cs" podUID="80ce8a9a-aa28-40e4-ac35-c7d379224208" containerName="ovs-vswitchd" Nov 26 14:44:20 crc kubenswrapper[5037]: I1126 14:44:20.908016 5037 scope.go:117] "RemoveContainer" containerID="5e69d7717514aa68d798cc4f8eee9b2d5d3e9666ca3b110c2cb4c6b90f9e1181" Nov 26 14:44:20 crc kubenswrapper[5037]: E1126 14:44:20.908479 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:44:25 crc kubenswrapper[5037]: E1126 14:44:25.796479 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff is running failed: container process not found" containerID="1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 14:44:25 crc kubenswrapper[5037]: E1126 14:44:25.797793 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff is running failed: container process not found" containerID="1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 14:44:25 crc kubenswrapper[5037]: E1126 14:44:25.797940 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="dc60cd871e55f538b7db49d446338e526894553aaa076fdbe1e2f04a853fb486" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 14:44:25 crc kubenswrapper[5037]: E1126 14:44:25.798326 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff is running failed: container process not found" containerID="1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 14:44:25 crc kubenswrapper[5037]: E1126 14:44:25.798406 5037 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-264cs" podUID="80ce8a9a-aa28-40e4-ac35-c7d379224208" containerName="ovsdb-server" Nov 26 14:44:25 crc kubenswrapper[5037]: E1126 14:44:25.801359 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="dc60cd871e55f538b7db49d446338e526894553aaa076fdbe1e2f04a853fb486" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 14:44:25 crc kubenswrapper[5037]: E1126 14:44:25.803938 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="dc60cd871e55f538b7db49d446338e526894553aaa076fdbe1e2f04a853fb486" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 14:44:25 crc kubenswrapper[5037]: E1126 14:44:25.804107 5037 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-264cs" podUID="80ce8a9a-aa28-40e4-ac35-c7d379224208" containerName="ovs-vswitchd" Nov 26 14:44:30 crc kubenswrapper[5037]: E1126 14:44:30.796366 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff is running failed: container process not found" containerID="1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 14:44:30 crc kubenswrapper[5037]: E1126 14:44:30.797704 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="dc60cd871e55f538b7db49d446338e526894553aaa076fdbe1e2f04a853fb486" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 14:44:30 crc kubenswrapper[5037]: E1126 14:44:30.797818 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff is running failed: container process not found" containerID="1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 14:44:30 crc kubenswrapper[5037]: E1126 14:44:30.798226 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff is running failed: container process not found" containerID="1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 14:44:30 crc kubenswrapper[5037]: E1126 14:44:30.798353 5037 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-264cs" podUID="80ce8a9a-aa28-40e4-ac35-c7d379224208" containerName="ovsdb-server" Nov 26 14:44:30 crc kubenswrapper[5037]: E1126 14:44:30.799515 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="dc60cd871e55f538b7db49d446338e526894553aaa076fdbe1e2f04a853fb486" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 14:44:30 crc kubenswrapper[5037]: E1126 14:44:30.802934 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="dc60cd871e55f538b7db49d446338e526894553aaa076fdbe1e2f04a853fb486" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 14:44:30 crc kubenswrapper[5037]: E1126 14:44:30.803009 5037 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-264cs" podUID="80ce8a9a-aa28-40e4-ac35-c7d379224208" containerName="ovs-vswitchd" Nov 26 14:44:32 crc kubenswrapper[5037]: I1126 14:44:32.908862 5037 scope.go:117] "RemoveContainer" containerID="5e69d7717514aa68d798cc4f8eee9b2d5d3e9666ca3b110c2cb4c6b90f9e1181" Nov 26 14:44:32 crc kubenswrapper[5037]: E1126 14:44:32.909453 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:44:33 crc kubenswrapper[5037]: I1126 14:44:33.731193 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-264cs_80ce8a9a-aa28-40e4-ac35-c7d379224208/ovs-vswitchd/0.log" Nov 26 14:44:33 crc kubenswrapper[5037]: I1126 14:44:33.732692 5037 generic.go:334] "Generic (PLEG): container finished" podID="80ce8a9a-aa28-40e4-ac35-c7d379224208" containerID="dc60cd871e55f538b7db49d446338e526894553aaa076fdbe1e2f04a853fb486" exitCode=137 Nov 26 14:44:33 crc kubenswrapper[5037]: I1126 14:44:33.732772 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-264cs" event={"ID":"80ce8a9a-aa28-40e4-ac35-c7d379224208","Type":"ContainerDied","Data":"dc60cd871e55f538b7db49d446338e526894553aaa076fdbe1e2f04a853fb486"} Nov 26 14:44:33 crc kubenswrapper[5037]: I1126 14:44:33.883063 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-264cs_80ce8a9a-aa28-40e4-ac35-c7d379224208/ovs-vswitchd/0.log" Nov 26 14:44:33 crc kubenswrapper[5037]: I1126 14:44:33.884002 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-264cs" Nov 26 14:44:33 crc kubenswrapper[5037]: I1126 14:44:33.890243 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/80ce8a9a-aa28-40e4-ac35-c7d379224208-etc-ovs\") pod \"80ce8a9a-aa28-40e4-ac35-c7d379224208\" (UID: \"80ce8a9a-aa28-40e4-ac35-c7d379224208\") " Nov 26 14:44:33 crc kubenswrapper[5037]: I1126 14:44:33.890347 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/80ce8a9a-aa28-40e4-ac35-c7d379224208-var-lib\") pod \"80ce8a9a-aa28-40e4-ac35-c7d379224208\" (UID: \"80ce8a9a-aa28-40e4-ac35-c7d379224208\") " Nov 26 14:44:33 crc kubenswrapper[5037]: I1126 14:44:33.890460 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/80ce8a9a-aa28-40e4-ac35-c7d379224208-scripts\") pod \"80ce8a9a-aa28-40e4-ac35-c7d379224208\" (UID: \"80ce8a9a-aa28-40e4-ac35-c7d379224208\") " Nov 26 14:44:33 crc kubenswrapper[5037]: I1126 14:44:33.890496 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80ce8a9a-aa28-40e4-ac35-c7d379224208-etc-ovs" (OuterVolumeSpecName: "etc-ovs") pod "80ce8a9a-aa28-40e4-ac35-c7d379224208" (UID: "80ce8a9a-aa28-40e4-ac35-c7d379224208"). InnerVolumeSpecName "etc-ovs". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:44:33 crc kubenswrapper[5037]: I1126 14:44:33.890518 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/80ce8a9a-aa28-40e4-ac35-c7d379224208-var-run\") pod \"80ce8a9a-aa28-40e4-ac35-c7d379224208\" (UID: \"80ce8a9a-aa28-40e4-ac35-c7d379224208\") " Nov 26 14:44:33 crc kubenswrapper[5037]: I1126 14:44:33.890536 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80ce8a9a-aa28-40e4-ac35-c7d379224208-var-lib" (OuterVolumeSpecName: "var-lib") pod "80ce8a9a-aa28-40e4-ac35-c7d379224208" (UID: "80ce8a9a-aa28-40e4-ac35-c7d379224208"). InnerVolumeSpecName "var-lib". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:44:33 crc kubenswrapper[5037]: I1126 14:44:33.890549 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/80ce8a9a-aa28-40e4-ac35-c7d379224208-var-log\") pod \"80ce8a9a-aa28-40e4-ac35-c7d379224208\" (UID: \"80ce8a9a-aa28-40e4-ac35-c7d379224208\") " Nov 26 14:44:33 crc kubenswrapper[5037]: I1126 14:44:33.890585 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-72s6r\" (UniqueName: \"kubernetes.io/projected/80ce8a9a-aa28-40e4-ac35-c7d379224208-kube-api-access-72s6r\") pod \"80ce8a9a-aa28-40e4-ac35-c7d379224208\" (UID: \"80ce8a9a-aa28-40e4-ac35-c7d379224208\") " Nov 26 14:44:33 crc kubenswrapper[5037]: I1126 14:44:33.890989 5037 reconciler_common.go:293] "Volume detached for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/80ce8a9a-aa28-40e4-ac35-c7d379224208-etc-ovs\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:33 crc kubenswrapper[5037]: I1126 14:44:33.891021 5037 reconciler_common.go:293] "Volume detached for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/80ce8a9a-aa28-40e4-ac35-c7d379224208-var-lib\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:33 crc kubenswrapper[5037]: I1126 14:44:33.891527 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80ce8a9a-aa28-40e4-ac35-c7d379224208-var-run" (OuterVolumeSpecName: "var-run") pod "80ce8a9a-aa28-40e4-ac35-c7d379224208" (UID: "80ce8a9a-aa28-40e4-ac35-c7d379224208"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:44:33 crc kubenswrapper[5037]: I1126 14:44:33.891565 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80ce8a9a-aa28-40e4-ac35-c7d379224208-var-log" (OuterVolumeSpecName: "var-log") pod "80ce8a9a-aa28-40e4-ac35-c7d379224208" (UID: "80ce8a9a-aa28-40e4-ac35-c7d379224208"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:44:33 crc kubenswrapper[5037]: I1126 14:44:33.891758 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/80ce8a9a-aa28-40e4-ac35-c7d379224208-scripts" (OuterVolumeSpecName: "scripts") pod "80ce8a9a-aa28-40e4-ac35-c7d379224208" (UID: "80ce8a9a-aa28-40e4-ac35-c7d379224208"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:44:33 crc kubenswrapper[5037]: I1126 14:44:33.899274 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/80ce8a9a-aa28-40e4-ac35-c7d379224208-kube-api-access-72s6r" (OuterVolumeSpecName: "kube-api-access-72s6r") pod "80ce8a9a-aa28-40e4-ac35-c7d379224208" (UID: "80ce8a9a-aa28-40e4-ac35-c7d379224208"). InnerVolumeSpecName "kube-api-access-72s6r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:33 crc kubenswrapper[5037]: I1126 14:44:33.991977 5037 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/80ce8a9a-aa28-40e4-ac35-c7d379224208-var-run\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:33 crc kubenswrapper[5037]: I1126 14:44:33.992009 5037 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/80ce8a9a-aa28-40e4-ac35-c7d379224208-var-log\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:33 crc kubenswrapper[5037]: I1126 14:44:33.992022 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-72s6r\" (UniqueName: \"kubernetes.io/projected/80ce8a9a-aa28-40e4-ac35-c7d379224208-kube-api-access-72s6r\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:33 crc kubenswrapper[5037]: I1126 14:44:33.992034 5037 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/80ce8a9a-aa28-40e4-ac35-c7d379224208-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.317965 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.397494 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/10886f85-c800-4999-8c79-c490c60696cc-etc-swift\") pod \"10886f85-c800-4999-8c79-c490c60696cc\" (UID: \"10886f85-c800-4999-8c79-c490c60696cc\") " Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.397555 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/10886f85-c800-4999-8c79-c490c60696cc-cache\") pod \"10886f85-c800-4999-8c79-c490c60696cc\" (UID: \"10886f85-c800-4999-8c79-c490c60696cc\") " Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.397593 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/10886f85-c800-4999-8c79-c490c60696cc-lock\") pod \"10886f85-c800-4999-8c79-c490c60696cc\" (UID: \"10886f85-c800-4999-8c79-c490c60696cc\") " Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.397635 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swift\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"10886f85-c800-4999-8c79-c490c60696cc\" (UID: \"10886f85-c800-4999-8c79-c490c60696cc\") " Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.397681 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7ltfz\" (UniqueName: \"kubernetes.io/projected/10886f85-c800-4999-8c79-c490c60696cc-kube-api-access-7ltfz\") pod \"10886f85-c800-4999-8c79-c490c60696cc\" (UID: \"10886f85-c800-4999-8c79-c490c60696cc\") " Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.398475 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/10886f85-c800-4999-8c79-c490c60696cc-cache" (OuterVolumeSpecName: "cache") pod "10886f85-c800-4999-8c79-c490c60696cc" (UID: "10886f85-c800-4999-8c79-c490c60696cc"). InnerVolumeSpecName "cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.398740 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/10886f85-c800-4999-8c79-c490c60696cc-lock" (OuterVolumeSpecName: "lock") pod "10886f85-c800-4999-8c79-c490c60696cc" (UID: "10886f85-c800-4999-8c79-c490c60696cc"). InnerVolumeSpecName "lock". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.401608 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "swift") pod "10886f85-c800-4999-8c79-c490c60696cc" (UID: "10886f85-c800-4999-8c79-c490c60696cc"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.401641 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10886f85-c800-4999-8c79-c490c60696cc-kube-api-access-7ltfz" (OuterVolumeSpecName: "kube-api-access-7ltfz") pod "10886f85-c800-4999-8c79-c490c60696cc" (UID: "10886f85-c800-4999-8c79-c490c60696cc"). InnerVolumeSpecName "kube-api-access-7ltfz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.401696 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10886f85-c800-4999-8c79-c490c60696cc-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "10886f85-c800-4999-8c79-c490c60696cc" (UID: "10886f85-c800-4999-8c79-c490c60696cc"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.499098 5037 reconciler_common.go:293] "Volume detached for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/10886f85-c800-4999-8c79-c490c60696cc-lock\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.499160 5037 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.499181 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7ltfz\" (UniqueName: \"kubernetes.io/projected/10886f85-c800-4999-8c79-c490c60696cc-kube-api-access-7ltfz\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.499194 5037 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/10886f85-c800-4999-8c79-c490c60696cc-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.499206 5037 reconciler_common.go:293] "Volume detached for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/10886f85-c800-4999-8c79-c490c60696cc-cache\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.513925 5037 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.599941 5037 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.749698 5037 generic.go:334] "Generic (PLEG): container finished" podID="10886f85-c800-4999-8c79-c490c60696cc" containerID="c8f7e68bd6dcee155bb73bde0f7e251636a9f691fa99efe37da6d71e22470060" exitCode=137 Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.749778 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"10886f85-c800-4999-8c79-c490c60696cc","Type":"ContainerDied","Data":"c8f7e68bd6dcee155bb73bde0f7e251636a9f691fa99efe37da6d71e22470060"} Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.749832 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"10886f85-c800-4999-8c79-c490c60696cc","Type":"ContainerDied","Data":"b80c78e74048f915f9c31fe4cb9091ed1e1e9a14388858dd2ca0a92d07227a6b"} Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.749849 5037 scope.go:117] "RemoveContainer" containerID="c8f7e68bd6dcee155bb73bde0f7e251636a9f691fa99efe37da6d71e22470060" Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.750043 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.751964 5037 generic.go:334] "Generic (PLEG): container finished" podID="fe17b260-d105-4274-88d1-d85fd9948f9f" containerID="a141207a0fff58064f3407d6c288ff7903f292bd3e192081eb2a010bd7fcf95d" exitCode=137 Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.752044 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"fe17b260-d105-4274-88d1-d85fd9948f9f","Type":"ContainerDied","Data":"a141207a0fff58064f3407d6c288ff7903f292bd3e192081eb2a010bd7fcf95d"} Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.753948 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-264cs_80ce8a9a-aa28-40e4-ac35-c7d379224208/ovs-vswitchd/0.log" Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.757250 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-264cs" event={"ID":"80ce8a9a-aa28-40e4-ac35-c7d379224208","Type":"ContainerDied","Data":"0bb14f6e64679cbe8f0af44beef4596b9d9476ba7856ce4fd8bdabd30a1a1179"} Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.757357 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-264cs" Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.777507 5037 scope.go:117] "RemoveContainer" containerID="73c060abeb7573649685e311227f2a579fdf95557d8415f02f112eb7df9fe387" Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.782252 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-264cs"] Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.789067 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-ovs-264cs"] Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.798737 5037 scope.go:117] "RemoveContainer" containerID="ad88500c12de1786aaa4f3cd261187b528c098a8b57abe7c0b3889beed1fd349" Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.802648 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.807272 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-storage-0"] Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.818130 5037 scope.go:117] "RemoveContainer" containerID="38af5291214696fc2ab5068031bf723126b6ea1a4502cbaf41fec1945bdddb71" Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.871017 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.879390 5037 scope.go:117] "RemoveContainer" containerID="3af054768a48001311b15006237ee32a28fa31bc5a3ba26f86659bf895c4f0b5" Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.913325 5037 scope.go:117] "RemoveContainer" containerID="b162b6c5fe59ecea9688eeaa61133779a54660d54311beec4f4febf2fc191948" Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.934504 5037 scope.go:117] "RemoveContainer" containerID="1a454db31dd2243c0baed5f659db3f03cf1284eb320367be8d2eeaee2d9e7140" Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.954279 5037 scope.go:117] "RemoveContainer" containerID="dfe76a7230b634adf6aebdf67b296fb27df1714714ebd62003a566a079db4ad7" Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.980314 5037 scope.go:117] "RemoveContainer" containerID="3269f937868b4639b15beb2313a77a6d697a8359d42a1eac21aab99aba4a3441" Nov 26 14:44:34 crc kubenswrapper[5037]: I1126 14:44:34.997524 5037 scope.go:117] "RemoveContainer" containerID="a67a55597dfa0413c7fcfba871c60e4ca78dcc1337e642fa8a730a82b2946f38" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.003655 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8fskp\" (UniqueName: \"kubernetes.io/projected/fe17b260-d105-4274-88d1-d85fd9948f9f-kube-api-access-8fskp\") pod \"fe17b260-d105-4274-88d1-d85fd9948f9f\" (UID: \"fe17b260-d105-4274-88d1-d85fd9948f9f\") " Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.003708 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-scripts\") pod \"fe17b260-d105-4274-88d1-d85fd9948f9f\" (UID: \"fe17b260-d105-4274-88d1-d85fd9948f9f\") " Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.003792 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/fe17b260-d105-4274-88d1-d85fd9948f9f-etc-machine-id\") pod \"fe17b260-d105-4274-88d1-d85fd9948f9f\" (UID: \"fe17b260-d105-4274-88d1-d85fd9948f9f\") " Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.003833 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-config-data-custom\") pod \"fe17b260-d105-4274-88d1-d85fd9948f9f\" (UID: \"fe17b260-d105-4274-88d1-d85fd9948f9f\") " Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.003860 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-config-data\") pod \"fe17b260-d105-4274-88d1-d85fd9948f9f\" (UID: \"fe17b260-d105-4274-88d1-d85fd9948f9f\") " Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.003887 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-combined-ca-bundle\") pod \"fe17b260-d105-4274-88d1-d85fd9948f9f\" (UID: \"fe17b260-d105-4274-88d1-d85fd9948f9f\") " Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.003921 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/fe17b260-d105-4274-88d1-d85fd9948f9f-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "fe17b260-d105-4274-88d1-d85fd9948f9f" (UID: "fe17b260-d105-4274-88d1-d85fd9948f9f"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.004137 5037 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/fe17b260-d105-4274-88d1-d85fd9948f9f-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.007553 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fe17b260-d105-4274-88d1-d85fd9948f9f-kube-api-access-8fskp" (OuterVolumeSpecName: "kube-api-access-8fskp") pod "fe17b260-d105-4274-88d1-d85fd9948f9f" (UID: "fe17b260-d105-4274-88d1-d85fd9948f9f"). InnerVolumeSpecName "kube-api-access-8fskp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.007975 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-scripts" (OuterVolumeSpecName: "scripts") pod "fe17b260-d105-4274-88d1-d85fd9948f9f" (UID: "fe17b260-d105-4274-88d1-d85fd9948f9f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.008955 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "fe17b260-d105-4274-88d1-d85fd9948f9f" (UID: "fe17b260-d105-4274-88d1-d85fd9948f9f"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.028469 5037 scope.go:117] "RemoveContainer" containerID="64e910dd424738dcc2a6a10dbfc0d43ed55b865d44976cf3ce77949fd94d142f" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.057387 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fe17b260-d105-4274-88d1-d85fd9948f9f" (UID: "fe17b260-d105-4274-88d1-d85fd9948f9f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.059794 5037 scope.go:117] "RemoveContainer" containerID="1830d485d70f2c4c16c972d8eb54d3d68060d42e9eb67b0f0be4a183511992c6" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.077573 5037 scope.go:117] "RemoveContainer" containerID="9ad134020857e5330738626c90a057bc32ca98d01d16f8a94a600086e2df114c" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.109043 5037 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.109069 5037 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.109078 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8fskp\" (UniqueName: \"kubernetes.io/projected/fe17b260-d105-4274-88d1-d85fd9948f9f-kube-api-access-8fskp\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.109091 5037 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.112990 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-config-data" (OuterVolumeSpecName: "config-data") pod "fe17b260-d105-4274-88d1-d85fd9948f9f" (UID: "fe17b260-d105-4274-88d1-d85fd9948f9f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.131027 5037 scope.go:117] "RemoveContainer" containerID="b54bd1523c4248a6b946bc2484b15b9a925819b903de19e564491a32a104536e" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.153106 5037 scope.go:117] "RemoveContainer" containerID="6af1db545967ed1a4d63df5e069cefc5f2002414e3177a1c53b51f7542200023" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.170245 5037 scope.go:117] "RemoveContainer" containerID="c8f7e68bd6dcee155bb73bde0f7e251636a9f691fa99efe37da6d71e22470060" Nov 26 14:44:35 crc kubenswrapper[5037]: E1126 14:44:35.170641 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c8f7e68bd6dcee155bb73bde0f7e251636a9f691fa99efe37da6d71e22470060\": container with ID starting with c8f7e68bd6dcee155bb73bde0f7e251636a9f691fa99efe37da6d71e22470060 not found: ID does not exist" containerID="c8f7e68bd6dcee155bb73bde0f7e251636a9f691fa99efe37da6d71e22470060" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.170686 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c8f7e68bd6dcee155bb73bde0f7e251636a9f691fa99efe37da6d71e22470060"} err="failed to get container status \"c8f7e68bd6dcee155bb73bde0f7e251636a9f691fa99efe37da6d71e22470060\": rpc error: code = NotFound desc = could not find container \"c8f7e68bd6dcee155bb73bde0f7e251636a9f691fa99efe37da6d71e22470060\": container with ID starting with c8f7e68bd6dcee155bb73bde0f7e251636a9f691fa99efe37da6d71e22470060 not found: ID does not exist" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.170717 5037 scope.go:117] "RemoveContainer" containerID="73c060abeb7573649685e311227f2a579fdf95557d8415f02f112eb7df9fe387" Nov 26 14:44:35 crc kubenswrapper[5037]: E1126 14:44:35.170956 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"73c060abeb7573649685e311227f2a579fdf95557d8415f02f112eb7df9fe387\": container with ID starting with 73c060abeb7573649685e311227f2a579fdf95557d8415f02f112eb7df9fe387 not found: ID does not exist" containerID="73c060abeb7573649685e311227f2a579fdf95557d8415f02f112eb7df9fe387" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.171000 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"73c060abeb7573649685e311227f2a579fdf95557d8415f02f112eb7df9fe387"} err="failed to get container status \"73c060abeb7573649685e311227f2a579fdf95557d8415f02f112eb7df9fe387\": rpc error: code = NotFound desc = could not find container \"73c060abeb7573649685e311227f2a579fdf95557d8415f02f112eb7df9fe387\": container with ID starting with 73c060abeb7573649685e311227f2a579fdf95557d8415f02f112eb7df9fe387 not found: ID does not exist" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.171028 5037 scope.go:117] "RemoveContainer" containerID="ad88500c12de1786aaa4f3cd261187b528c098a8b57abe7c0b3889beed1fd349" Nov 26 14:44:35 crc kubenswrapper[5037]: E1126 14:44:35.171253 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ad88500c12de1786aaa4f3cd261187b528c098a8b57abe7c0b3889beed1fd349\": container with ID starting with ad88500c12de1786aaa4f3cd261187b528c098a8b57abe7c0b3889beed1fd349 not found: ID does not exist" containerID="ad88500c12de1786aaa4f3cd261187b528c098a8b57abe7c0b3889beed1fd349" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.171349 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ad88500c12de1786aaa4f3cd261187b528c098a8b57abe7c0b3889beed1fd349"} err="failed to get container status \"ad88500c12de1786aaa4f3cd261187b528c098a8b57abe7c0b3889beed1fd349\": rpc error: code = NotFound desc = could not find container \"ad88500c12de1786aaa4f3cd261187b528c098a8b57abe7c0b3889beed1fd349\": container with ID starting with ad88500c12de1786aaa4f3cd261187b528c098a8b57abe7c0b3889beed1fd349 not found: ID does not exist" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.171374 5037 scope.go:117] "RemoveContainer" containerID="38af5291214696fc2ab5068031bf723126b6ea1a4502cbaf41fec1945bdddb71" Nov 26 14:44:35 crc kubenswrapper[5037]: E1126 14:44:35.171709 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38af5291214696fc2ab5068031bf723126b6ea1a4502cbaf41fec1945bdddb71\": container with ID starting with 38af5291214696fc2ab5068031bf723126b6ea1a4502cbaf41fec1945bdddb71 not found: ID does not exist" containerID="38af5291214696fc2ab5068031bf723126b6ea1a4502cbaf41fec1945bdddb71" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.171749 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38af5291214696fc2ab5068031bf723126b6ea1a4502cbaf41fec1945bdddb71"} err="failed to get container status \"38af5291214696fc2ab5068031bf723126b6ea1a4502cbaf41fec1945bdddb71\": rpc error: code = NotFound desc = could not find container \"38af5291214696fc2ab5068031bf723126b6ea1a4502cbaf41fec1945bdddb71\": container with ID starting with 38af5291214696fc2ab5068031bf723126b6ea1a4502cbaf41fec1945bdddb71 not found: ID does not exist" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.171767 5037 scope.go:117] "RemoveContainer" containerID="3af054768a48001311b15006237ee32a28fa31bc5a3ba26f86659bf895c4f0b5" Nov 26 14:44:35 crc kubenswrapper[5037]: E1126 14:44:35.172243 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3af054768a48001311b15006237ee32a28fa31bc5a3ba26f86659bf895c4f0b5\": container with ID starting with 3af054768a48001311b15006237ee32a28fa31bc5a3ba26f86659bf895c4f0b5 not found: ID does not exist" containerID="3af054768a48001311b15006237ee32a28fa31bc5a3ba26f86659bf895c4f0b5" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.172270 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3af054768a48001311b15006237ee32a28fa31bc5a3ba26f86659bf895c4f0b5"} err="failed to get container status \"3af054768a48001311b15006237ee32a28fa31bc5a3ba26f86659bf895c4f0b5\": rpc error: code = NotFound desc = could not find container \"3af054768a48001311b15006237ee32a28fa31bc5a3ba26f86659bf895c4f0b5\": container with ID starting with 3af054768a48001311b15006237ee32a28fa31bc5a3ba26f86659bf895c4f0b5 not found: ID does not exist" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.172321 5037 scope.go:117] "RemoveContainer" containerID="b162b6c5fe59ecea9688eeaa61133779a54660d54311beec4f4febf2fc191948" Nov 26 14:44:35 crc kubenswrapper[5037]: E1126 14:44:35.172658 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b162b6c5fe59ecea9688eeaa61133779a54660d54311beec4f4febf2fc191948\": container with ID starting with b162b6c5fe59ecea9688eeaa61133779a54660d54311beec4f4febf2fc191948 not found: ID does not exist" containerID="b162b6c5fe59ecea9688eeaa61133779a54660d54311beec4f4febf2fc191948" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.172696 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b162b6c5fe59ecea9688eeaa61133779a54660d54311beec4f4febf2fc191948"} err="failed to get container status \"b162b6c5fe59ecea9688eeaa61133779a54660d54311beec4f4febf2fc191948\": rpc error: code = NotFound desc = could not find container \"b162b6c5fe59ecea9688eeaa61133779a54660d54311beec4f4febf2fc191948\": container with ID starting with b162b6c5fe59ecea9688eeaa61133779a54660d54311beec4f4febf2fc191948 not found: ID does not exist" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.172714 5037 scope.go:117] "RemoveContainer" containerID="1a454db31dd2243c0baed5f659db3f03cf1284eb320367be8d2eeaee2d9e7140" Nov 26 14:44:35 crc kubenswrapper[5037]: E1126 14:44:35.172999 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1a454db31dd2243c0baed5f659db3f03cf1284eb320367be8d2eeaee2d9e7140\": container with ID starting with 1a454db31dd2243c0baed5f659db3f03cf1284eb320367be8d2eeaee2d9e7140 not found: ID does not exist" containerID="1a454db31dd2243c0baed5f659db3f03cf1284eb320367be8d2eeaee2d9e7140" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.173032 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1a454db31dd2243c0baed5f659db3f03cf1284eb320367be8d2eeaee2d9e7140"} err="failed to get container status \"1a454db31dd2243c0baed5f659db3f03cf1284eb320367be8d2eeaee2d9e7140\": rpc error: code = NotFound desc = could not find container \"1a454db31dd2243c0baed5f659db3f03cf1284eb320367be8d2eeaee2d9e7140\": container with ID starting with 1a454db31dd2243c0baed5f659db3f03cf1284eb320367be8d2eeaee2d9e7140 not found: ID does not exist" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.173052 5037 scope.go:117] "RemoveContainer" containerID="dfe76a7230b634adf6aebdf67b296fb27df1714714ebd62003a566a079db4ad7" Nov 26 14:44:35 crc kubenswrapper[5037]: E1126 14:44:35.174235 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dfe76a7230b634adf6aebdf67b296fb27df1714714ebd62003a566a079db4ad7\": container with ID starting with dfe76a7230b634adf6aebdf67b296fb27df1714714ebd62003a566a079db4ad7 not found: ID does not exist" containerID="dfe76a7230b634adf6aebdf67b296fb27df1714714ebd62003a566a079db4ad7" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.174266 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dfe76a7230b634adf6aebdf67b296fb27df1714714ebd62003a566a079db4ad7"} err="failed to get container status \"dfe76a7230b634adf6aebdf67b296fb27df1714714ebd62003a566a079db4ad7\": rpc error: code = NotFound desc = could not find container \"dfe76a7230b634adf6aebdf67b296fb27df1714714ebd62003a566a079db4ad7\": container with ID starting with dfe76a7230b634adf6aebdf67b296fb27df1714714ebd62003a566a079db4ad7 not found: ID does not exist" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.174298 5037 scope.go:117] "RemoveContainer" containerID="3269f937868b4639b15beb2313a77a6d697a8359d42a1eac21aab99aba4a3441" Nov 26 14:44:35 crc kubenswrapper[5037]: E1126 14:44:35.174524 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3269f937868b4639b15beb2313a77a6d697a8359d42a1eac21aab99aba4a3441\": container with ID starting with 3269f937868b4639b15beb2313a77a6d697a8359d42a1eac21aab99aba4a3441 not found: ID does not exist" containerID="3269f937868b4639b15beb2313a77a6d697a8359d42a1eac21aab99aba4a3441" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.174557 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3269f937868b4639b15beb2313a77a6d697a8359d42a1eac21aab99aba4a3441"} err="failed to get container status \"3269f937868b4639b15beb2313a77a6d697a8359d42a1eac21aab99aba4a3441\": rpc error: code = NotFound desc = could not find container \"3269f937868b4639b15beb2313a77a6d697a8359d42a1eac21aab99aba4a3441\": container with ID starting with 3269f937868b4639b15beb2313a77a6d697a8359d42a1eac21aab99aba4a3441 not found: ID does not exist" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.174578 5037 scope.go:117] "RemoveContainer" containerID="a67a55597dfa0413c7fcfba871c60e4ca78dcc1337e642fa8a730a82b2946f38" Nov 26 14:44:35 crc kubenswrapper[5037]: E1126 14:44:35.174871 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a67a55597dfa0413c7fcfba871c60e4ca78dcc1337e642fa8a730a82b2946f38\": container with ID starting with a67a55597dfa0413c7fcfba871c60e4ca78dcc1337e642fa8a730a82b2946f38 not found: ID does not exist" containerID="a67a55597dfa0413c7fcfba871c60e4ca78dcc1337e642fa8a730a82b2946f38" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.174901 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a67a55597dfa0413c7fcfba871c60e4ca78dcc1337e642fa8a730a82b2946f38"} err="failed to get container status \"a67a55597dfa0413c7fcfba871c60e4ca78dcc1337e642fa8a730a82b2946f38\": rpc error: code = NotFound desc = could not find container \"a67a55597dfa0413c7fcfba871c60e4ca78dcc1337e642fa8a730a82b2946f38\": container with ID starting with a67a55597dfa0413c7fcfba871c60e4ca78dcc1337e642fa8a730a82b2946f38 not found: ID does not exist" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.174921 5037 scope.go:117] "RemoveContainer" containerID="64e910dd424738dcc2a6a10dbfc0d43ed55b865d44976cf3ce77949fd94d142f" Nov 26 14:44:35 crc kubenswrapper[5037]: E1126 14:44:35.175189 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"64e910dd424738dcc2a6a10dbfc0d43ed55b865d44976cf3ce77949fd94d142f\": container with ID starting with 64e910dd424738dcc2a6a10dbfc0d43ed55b865d44976cf3ce77949fd94d142f not found: ID does not exist" containerID="64e910dd424738dcc2a6a10dbfc0d43ed55b865d44976cf3ce77949fd94d142f" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.175227 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"64e910dd424738dcc2a6a10dbfc0d43ed55b865d44976cf3ce77949fd94d142f"} err="failed to get container status \"64e910dd424738dcc2a6a10dbfc0d43ed55b865d44976cf3ce77949fd94d142f\": rpc error: code = NotFound desc = could not find container \"64e910dd424738dcc2a6a10dbfc0d43ed55b865d44976cf3ce77949fd94d142f\": container with ID starting with 64e910dd424738dcc2a6a10dbfc0d43ed55b865d44976cf3ce77949fd94d142f not found: ID does not exist" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.175254 5037 scope.go:117] "RemoveContainer" containerID="1830d485d70f2c4c16c972d8eb54d3d68060d42e9eb67b0f0be4a183511992c6" Nov 26 14:44:35 crc kubenswrapper[5037]: E1126 14:44:35.175556 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1830d485d70f2c4c16c972d8eb54d3d68060d42e9eb67b0f0be4a183511992c6\": container with ID starting with 1830d485d70f2c4c16c972d8eb54d3d68060d42e9eb67b0f0be4a183511992c6 not found: ID does not exist" containerID="1830d485d70f2c4c16c972d8eb54d3d68060d42e9eb67b0f0be4a183511992c6" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.175596 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1830d485d70f2c4c16c972d8eb54d3d68060d42e9eb67b0f0be4a183511992c6"} err="failed to get container status \"1830d485d70f2c4c16c972d8eb54d3d68060d42e9eb67b0f0be4a183511992c6\": rpc error: code = NotFound desc = could not find container \"1830d485d70f2c4c16c972d8eb54d3d68060d42e9eb67b0f0be4a183511992c6\": container with ID starting with 1830d485d70f2c4c16c972d8eb54d3d68060d42e9eb67b0f0be4a183511992c6 not found: ID does not exist" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.175622 5037 scope.go:117] "RemoveContainer" containerID="9ad134020857e5330738626c90a057bc32ca98d01d16f8a94a600086e2df114c" Nov 26 14:44:35 crc kubenswrapper[5037]: E1126 14:44:35.175795 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ad134020857e5330738626c90a057bc32ca98d01d16f8a94a600086e2df114c\": container with ID starting with 9ad134020857e5330738626c90a057bc32ca98d01d16f8a94a600086e2df114c not found: ID does not exist" containerID="9ad134020857e5330738626c90a057bc32ca98d01d16f8a94a600086e2df114c" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.175827 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ad134020857e5330738626c90a057bc32ca98d01d16f8a94a600086e2df114c"} err="failed to get container status \"9ad134020857e5330738626c90a057bc32ca98d01d16f8a94a600086e2df114c\": rpc error: code = NotFound desc = could not find container \"9ad134020857e5330738626c90a057bc32ca98d01d16f8a94a600086e2df114c\": container with ID starting with 9ad134020857e5330738626c90a057bc32ca98d01d16f8a94a600086e2df114c not found: ID does not exist" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.175845 5037 scope.go:117] "RemoveContainer" containerID="b54bd1523c4248a6b946bc2484b15b9a925819b903de19e564491a32a104536e" Nov 26 14:44:35 crc kubenswrapper[5037]: E1126 14:44:35.176221 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b54bd1523c4248a6b946bc2484b15b9a925819b903de19e564491a32a104536e\": container with ID starting with b54bd1523c4248a6b946bc2484b15b9a925819b903de19e564491a32a104536e not found: ID does not exist" containerID="b54bd1523c4248a6b946bc2484b15b9a925819b903de19e564491a32a104536e" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.176251 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b54bd1523c4248a6b946bc2484b15b9a925819b903de19e564491a32a104536e"} err="failed to get container status \"b54bd1523c4248a6b946bc2484b15b9a925819b903de19e564491a32a104536e\": rpc error: code = NotFound desc = could not find container \"b54bd1523c4248a6b946bc2484b15b9a925819b903de19e564491a32a104536e\": container with ID starting with b54bd1523c4248a6b946bc2484b15b9a925819b903de19e564491a32a104536e not found: ID does not exist" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.176271 5037 scope.go:117] "RemoveContainer" containerID="6af1db545967ed1a4d63df5e069cefc5f2002414e3177a1c53b51f7542200023" Nov 26 14:44:35 crc kubenswrapper[5037]: E1126 14:44:35.176622 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6af1db545967ed1a4d63df5e069cefc5f2002414e3177a1c53b51f7542200023\": container with ID starting with 6af1db545967ed1a4d63df5e069cefc5f2002414e3177a1c53b51f7542200023 not found: ID does not exist" containerID="6af1db545967ed1a4d63df5e069cefc5f2002414e3177a1c53b51f7542200023" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.176653 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6af1db545967ed1a4d63df5e069cefc5f2002414e3177a1c53b51f7542200023"} err="failed to get container status \"6af1db545967ed1a4d63df5e069cefc5f2002414e3177a1c53b51f7542200023\": rpc error: code = NotFound desc = could not find container \"6af1db545967ed1a4d63df5e069cefc5f2002414e3177a1c53b51f7542200023\": container with ID starting with 6af1db545967ed1a4d63df5e069cefc5f2002414e3177a1c53b51f7542200023 not found: ID does not exist" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.176673 5037 scope.go:117] "RemoveContainer" containerID="dc60cd871e55f538b7db49d446338e526894553aaa076fdbe1e2f04a853fb486" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.196725 5037 scope.go:117] "RemoveContainer" containerID="1001fa505fa021ba7eb16889d961ca146c092a2f162261939f9bd4fb303c0bff" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.210184 5037 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe17b260-d105-4274-88d1-d85fd9948f9f-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.215663 5037 scope.go:117] "RemoveContainer" containerID="a1372d4864850cb820cd97d1937d4be30fd34be7f7c558211291bd85334e4082" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.771177 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"fe17b260-d105-4274-88d1-d85fd9948f9f","Type":"ContainerDied","Data":"cefb06f33be235b2147738c7a9f8b79ef5ea8381b1098388471d2071462e2d8a"} Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.771516 5037 scope.go:117] "RemoveContainer" containerID="140e7be2182c285f86914d1d0349ab0f880704f06b09bd28f8522e6957b1e06c" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.771216 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.791831 5037 scope.go:117] "RemoveContainer" containerID="a141207a0fff58064f3407d6c288ff7903f292bd3e192081eb2a010bd7fcf95d" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.808514 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.813494 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.920872 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="10886f85-c800-4999-8c79-c490c60696cc" path="/var/lib/kubelet/pods/10886f85-c800-4999-8c79-c490c60696cc/volumes" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.922736 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="80ce8a9a-aa28-40e4-ac35-c7d379224208" path="/var/lib/kubelet/pods/80ce8a9a-aa28-40e4-ac35-c7d379224208/volumes" Nov 26 14:44:35 crc kubenswrapper[5037]: I1126 14:44:35.923380 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fe17b260-d105-4274-88d1-d85fd9948f9f" path="/var/lib/kubelet/pods/fe17b260-d105-4274-88d1-d85fd9948f9f/volumes" Nov 26 14:44:40 crc kubenswrapper[5037]: I1126 14:44:40.416845 5037 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/keystone-fb548d49-hf8zh" podUID="fe13f626-50c7-4ec3-b967-20f038731571" containerName="keystone-api" probeResult="failure" output="Get \"https://10.217.0.149:5000/v3\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 26 14:44:46 crc kubenswrapper[5037]: I1126 14:44:46.908151 5037 scope.go:117] "RemoveContainer" containerID="5e69d7717514aa68d798cc4f8eee9b2d5d3e9666ca3b110c2cb4c6b90f9e1181" Nov 26 14:44:46 crc kubenswrapper[5037]: E1126 14:44:46.908932 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.159186 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402805-5cbhs"] Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.160625 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="300dce8f-4337-4707-8075-f32b93f03e4f" containerName="mysql-bootstrap" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.160646 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="300dce8f-4337-4707-8075-f32b93f03e4f" containerName="mysql-bootstrap" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.160668 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec26620a-6ad8-4792-bb25-543dc31d3be5" containerName="openstack-network-exporter" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.160676 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec26620a-6ad8-4792-bb25-543dc31d3be5" containerName="openstack-network-exporter" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.160686 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80ce8a9a-aa28-40e4-ac35-c7d379224208" containerName="ovsdb-server-init" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.160696 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="80ce8a9a-aa28-40e4-ac35-c7d379224208" containerName="ovsdb-server-init" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.160709 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80ce8a9a-aa28-40e4-ac35-c7d379224208" containerName="ovs-vswitchd" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.160719 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="80ce8a9a-aa28-40e4-ac35-c7d379224208" containerName="ovs-vswitchd" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.160731 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8df4197d-046b-4b35-a14a-b382bda46242" containerName="mariadb-account-delete" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.160740 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="8df4197d-046b-4b35-a14a-b382bda46242" containerName="mariadb-account-delete" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.160760 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2d75a18-6446-4558-af57-c6e0c957fc3b" containerName="placement-log" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.160770 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2d75a18-6446-4558-af57-c6e0c957fc3b" containerName="placement-log" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.160785 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="693d1a99-bf33-42ee-adea-2f8ce0f6c002" containerName="glance-httpd" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.160797 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="693d1a99-bf33-42ee-adea-2f8ce0f6c002" containerName="glance-httpd" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.160811 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48" containerName="openstack-network-exporter" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.160820 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48" containerName="openstack-network-exporter" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.160832 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a97b4f35-04a7-47c3-a658-170645023de6" containerName="neutron-httpd" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.160842 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="a97b4f35-04a7-47c3-a658-170645023de6" containerName="neutron-httpd" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.160853 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf45bdb2-c880-43f7-b30a-4d1b36363f7d" containerName="mysql-bootstrap" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.160863 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf45bdb2-c880-43f7-b30a-4d1b36363f7d" containerName="mysql-bootstrap" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.160873 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="container-replicator" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.160882 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="container-replicator" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.160895 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e" containerName="glance-httpd" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.160903 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e" containerName="glance-httpd" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.160919 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c3c49ff-cf53-4b5b-ba83-10877d499763" containerName="nova-scheduler-scheduler" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.160927 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c3c49ff-cf53-4b5b-ba83-10877d499763" containerName="nova-scheduler-scheduler" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.160939 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cff988a9-69e2-42cc-a456-426f13be8a58" containerName="nova-api-log" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.160948 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="cff988a9-69e2-42cc-a456-426f13be8a58" containerName="nova-api-log" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.160966 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba78b94a-32d0-4377-ac41-ffd036b241bf" containerName="setup-container" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.160976 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba78b94a-32d0-4377-ac41-ffd036b241bf" containerName="setup-container" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.160993 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2d75a18-6446-4558-af57-c6e0c957fc3b" containerName="placement-api" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.161114 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2d75a18-6446-4558-af57-c6e0c957fc3b" containerName="placement-api" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.161128 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="account-replicator" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.161139 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="account-replicator" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.161150 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3dc5e2c-0729-4f4d-8481-bd8fb0064a80" containerName="mariadb-account-delete" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.161160 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3dc5e2c-0729-4f4d-8481-bd8fb0064a80" containerName="mariadb-account-delete" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.161173 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="container-auditor" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.161182 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="container-auditor" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.161193 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba78b94a-32d0-4377-ac41-ffd036b241bf" containerName="rabbitmq" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.161202 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba78b94a-32d0-4377-ac41-ffd036b241bf" containerName="rabbitmq" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.161216 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4408c030-a5ac-49ae-9361-54cbe3c27108" containerName="nova-cell1-novncproxy-novncproxy" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.161225 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="4408c030-a5ac-49ae-9361-54cbe3c27108" containerName="nova-cell1-novncproxy-novncproxy" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.161239 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec26620a-6ad8-4792-bb25-543dc31d3be5" containerName="ovn-northd" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.161248 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec26620a-6ad8-4792-bb25-543dc31d3be5" containerName="ovn-northd" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.161259 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07720f90-b6f7-4b81-9c32-17f1e72b19fa" containerName="cinder-api-log" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.161267 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="07720f90-b6f7-4b81-9c32-17f1e72b19fa" containerName="cinder-api-log" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.161281 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aed636f4-272c-4379-a6f3-8247ae0e46cc" containerName="proxy-server" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.161318 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="aed636f4-272c-4379-a6f3-8247ae0e46cc" containerName="proxy-server" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.161334 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bdd4849b-e92e-473d-88d0-74c060c04eb7" containerName="memcached" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.161345 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="bdd4849b-e92e-473d-88d0-74c060c04eb7" containerName="memcached" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.161359 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf" containerName="ovn-controller" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.161367 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf" containerName="ovn-controller" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.161385 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d49cc40-ce20-415f-a979-398430c2bd81" containerName="barbican-worker-log" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.161394 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d49cc40-ce20-415f-a979-398430c2bd81" containerName="barbican-worker-log" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.161413 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="object-expirer" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.161422 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="object-expirer" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.161434 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="rsync" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.161443 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="rsync" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.161454 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4fd340f-f656-4ec3-aba1-a33eaa58aed0" containerName="mariadb-account-delete" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.161463 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4fd340f-f656-4ec3-aba1-a33eaa58aed0" containerName="mariadb-account-delete" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.161474 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="334f3bb7-793e-4cff-b0ef-de24dc8a46b5" containerName="barbican-api-log" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.161484 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="334f3bb7-793e-4cff-b0ef-de24dc8a46b5" containerName="barbican-api-log" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.161497 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd171888-b656-4511-af7d-cdff1058bf5f" containerName="kube-state-metrics" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.161507 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd171888-b656-4511-af7d-cdff1058bf5f" containerName="kube-state-metrics" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.161523 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10cd5eda-54cc-4c0a-91ca-4f8217e5220e" containerName="mariadb-account-delete" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.161532 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="10cd5eda-54cc-4c0a-91ca-4f8217e5220e" containerName="mariadb-account-delete" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.161548 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0797697-2b6d-4684-9fe1-e17a91f80369" containerName="mariadb-account-delete" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.161557 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0797697-2b6d-4684-9fe1-e17a91f80369" containerName="mariadb-account-delete" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.161573 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89" containerName="ovsdbserver-nb" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.161583 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89" containerName="ovsdbserver-nb" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.161601 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="300dce8f-4337-4707-8075-f32b93f03e4f" containerName="galera" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.161609 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="300dce8f-4337-4707-8075-f32b93f03e4f" containerName="galera" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.161627 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="object-updater" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.161636 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="object-updater" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.161653 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8707a232-f648-4795-b250-d29069f26514" containerName="nova-cell0-conductor-conductor" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.161662 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="8707a232-f648-4795-b250-d29069f26514" containerName="nova-cell0-conductor-conductor" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.161674 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19ae84d4-26f8-4e11-bd01-da880def5547" containerName="barbican-keystone-listener" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.161684 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="19ae84d4-26f8-4e11-bd01-da880def5547" containerName="barbican-keystone-listener" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.161698 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="257e4b94-6b37-4243-8e8a-6bd47f0a5603" containerName="init" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.161707 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="257e4b94-6b37-4243-8e8a-6bd47f0a5603" containerName="init" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.161728 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20b489db-2066-4222-9131-99da1bd054e3" containerName="openstack-network-exporter" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.161739 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="20b489db-2066-4222-9131-99da1bd054e3" containerName="openstack-network-exporter" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.161759 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b53df32-369f-4a91-bb97-5da067cc3c6a" containerName="mariadb-account-delete" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.161770 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b53df32-369f-4a91-bb97-5da067cc3c6a" containerName="mariadb-account-delete" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.161787 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e" containerName="nova-metadata-metadata" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.161798 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e" containerName="nova-metadata-metadata" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.161818 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89" containerName="openstack-network-exporter" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.161830 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89" containerName="openstack-network-exporter" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.161845 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e" containerName="nova-metadata-log" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.161857 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e" containerName="nova-metadata-log" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.161867 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b90229c-2a39-4627-896f-9c1b27e4f1d5" containerName="ceilometer-central-agent" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.161877 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b90229c-2a39-4627-896f-9c1b27e4f1d5" containerName="ceilometer-central-agent" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.161893 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd47ce65-1426-47e2-a5d1-6efd83bac3ab" containerName="nova-cell1-conductor-conductor" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.161901 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd47ce65-1426-47e2-a5d1-6efd83bac3ab" containerName="nova-cell1-conductor-conductor" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.161915 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a97b4f35-04a7-47c3-a658-170645023de6" containerName="neutron-api" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.161925 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="a97b4f35-04a7-47c3-a658-170645023de6" containerName="neutron-api" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.161940 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe17b260-d105-4274-88d1-d85fd9948f9f" containerName="probe" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.161949 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe17b260-d105-4274-88d1-d85fd9948f9f" containerName="probe" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.161968 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f05291f-1331-411b-9971-c71218d11a35" containerName="setup-container" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.161978 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f05291f-1331-411b-9971-c71218d11a35" containerName="setup-container" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.161998 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe17b260-d105-4274-88d1-d85fd9948f9f" containerName="cinder-scheduler" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162007 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe17b260-d105-4274-88d1-d85fd9948f9f" containerName="cinder-scheduler" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.162020 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e" containerName="glance-log" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162027 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e" containerName="glance-log" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.162041 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="object-server" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162049 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="object-server" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.162057 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="object-auditor" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162078 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="object-auditor" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.162089 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b90229c-2a39-4627-896f-9c1b27e4f1d5" containerName="sg-core" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162096 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b90229c-2a39-4627-896f-9c1b27e4f1d5" containerName="sg-core" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.162108 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="account-reaper" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162116 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="account-reaper" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.162129 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aed636f4-272c-4379-a6f3-8247ae0e46cc" containerName="proxy-httpd" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162138 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="aed636f4-272c-4379-a6f3-8247ae0e46cc" containerName="proxy-httpd" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.162149 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7ece585-54a5-40d4-866f-98c968f03910" containerName="mariadb-account-delete" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162156 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7ece585-54a5-40d4-866f-98c968f03910" containerName="mariadb-account-delete" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.162166 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="swift-recon-cron" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162173 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="swift-recon-cron" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.162187 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07720f90-b6f7-4b81-9c32-17f1e72b19fa" containerName="cinder-api" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162195 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="07720f90-b6f7-4b81-9c32-17f1e72b19fa" containerName="cinder-api" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.162204 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="257e4b94-6b37-4243-8e8a-6bd47f0a5603" containerName="dnsmasq-dns" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162212 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="257e4b94-6b37-4243-8e8a-6bd47f0a5603" containerName="dnsmasq-dns" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.162226 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80ce8a9a-aa28-40e4-ac35-c7d379224208" containerName="ovsdb-server" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162234 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="80ce8a9a-aa28-40e4-ac35-c7d379224208" containerName="ovsdb-server" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.162245 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf45bdb2-c880-43f7-b30a-4d1b36363f7d" containerName="galera" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162252 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf45bdb2-c880-43f7-b30a-4d1b36363f7d" containerName="galera" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.162261 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="334f3bb7-793e-4cff-b0ef-de24dc8a46b5" containerName="barbican-api" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162269 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="334f3bb7-793e-4cff-b0ef-de24dc8a46b5" containerName="barbican-api" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.162282 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cff988a9-69e2-42cc-a456-426f13be8a58" containerName="nova-api-api" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162309 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="cff988a9-69e2-42cc-a456-426f13be8a58" containerName="nova-api-api" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.162321 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d49cc40-ce20-415f-a979-398430c2bd81" containerName="barbican-worker" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162329 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d49cc40-ce20-415f-a979-398430c2bd81" containerName="barbican-worker" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.162337 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b90229c-2a39-4627-896f-9c1b27e4f1d5" containerName="proxy-httpd" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162364 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b90229c-2a39-4627-896f-9c1b27e4f1d5" containerName="proxy-httpd" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.162377 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe13f626-50c7-4ec3-b967-20f038731571" containerName="keystone-api" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162384 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe13f626-50c7-4ec3-b967-20f038731571" containerName="keystone-api" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.162402 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f05291f-1331-411b-9971-c71218d11a35" containerName="rabbitmq" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162426 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f05291f-1331-411b-9971-c71218d11a35" containerName="rabbitmq" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.162436 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="container-updater" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162443 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="container-updater" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.162454 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19ae84d4-26f8-4e11-bd01-da880def5547" containerName="barbican-keystone-listener-log" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162461 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="19ae84d4-26f8-4e11-bd01-da880def5547" containerName="barbican-keystone-listener-log" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.162472 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03ffa609-b428-4a0e-8ec1-5c205391cf7b" containerName="mariadb-account-delete" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162479 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="03ffa609-b428-4a0e-8ec1-5c205391cf7b" containerName="mariadb-account-delete" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.162490 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48" containerName="ovsdbserver-sb" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162497 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48" containerName="ovsdbserver-sb" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.162506 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="container-server" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162513 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="container-server" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.162522 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="account-auditor" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162529 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="account-auditor" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.162538 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="account-server" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162545 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="account-server" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.162557 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="693d1a99-bf33-42ee-adea-2f8ce0f6c002" containerName="glance-log" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162565 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="693d1a99-bf33-42ee-adea-2f8ce0f6c002" containerName="glance-log" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.162578 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b90229c-2a39-4627-896f-9c1b27e4f1d5" containerName="ceilometer-notification-agent" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162586 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b90229c-2a39-4627-896f-9c1b27e4f1d5" containerName="ceilometer-notification-agent" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.162594 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="object-replicator" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162602 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="object-replicator" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162770 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b90229c-2a39-4627-896f-9c1b27e4f1d5" containerName="ceilometer-notification-agent" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162782 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="cff988a9-69e2-42cc-a456-426f13be8a58" containerName="nova-api-api" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162795 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="80ce8a9a-aa28-40e4-ac35-c7d379224208" containerName="ovsdb-server" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162806 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="03ffa609-b428-4a0e-8ec1-5c205391cf7b" containerName="mariadb-account-delete" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162820 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd171888-b656-4511-af7d-cdff1058bf5f" containerName="kube-state-metrics" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162835 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e" containerName="nova-metadata-metadata" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162845 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="object-auditor" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162860 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="a97b4f35-04a7-47c3-a658-170645023de6" containerName="neutron-httpd" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162869 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9da4e66-4ad9-41e8-8d95-21b1cefbfb0e" containerName="nova-metadata-log" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162881 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="rsync" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162893 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e" containerName="glance-httpd" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162907 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2d75a18-6446-4558-af57-c6e0c957fc3b" containerName="placement-api" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162919 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="a97b4f35-04a7-47c3-a658-170645023de6" containerName="neutron-api" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162931 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="cff988a9-69e2-42cc-a456-426f13be8a58" containerName="nova-api-log" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162944 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89" containerName="ovsdbserver-nb" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162954 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf45bdb2-c880-43f7-b30a-4d1b36363f7d" containerName="galera" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162966 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="account-server" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162974 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe17b260-d105-4274-88d1-d85fd9948f9f" containerName="cinder-scheduler" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.162987 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b90229c-2a39-4627-896f-9c1b27e4f1d5" containerName="ceilometer-central-agent" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163001 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48" containerName="openstack-network-exporter" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163014 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="bdd4849b-e92e-473d-88d0-74c060c04eb7" containerName="memcached" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163044 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="account-auditor" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163052 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="334f3bb7-793e-4cff-b0ef-de24dc8a46b5" containerName="barbican-api-log" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163060 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fc5b7e5-d3e2-4992-ad53-21a16bb5d62e" containerName="glance-log" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163071 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="07720f90-b6f7-4b81-9c32-17f1e72b19fa" containerName="cinder-api" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163080 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="4408c030-a5ac-49ae-9361-54cbe3c27108" containerName="nova-cell1-novncproxy-novncproxy" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163093 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c3c49ff-cf53-4b5b-ba83-10877d499763" containerName="nova-scheduler-scheduler" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163103 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="container-replicator" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163115 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="07720f90-b6f7-4b81-9c32-17f1e72b19fa" containerName="cinder-api-log" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163124 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="object-updater" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163138 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="cdfb3a48-f040-40b1-a9ca-98d7b7f4fa89" containerName="openstack-network-exporter" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163148 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0797697-2b6d-4684-9fe1-e17a91f80369" containerName="mariadb-account-delete" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163158 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="8707a232-f648-4795-b250-d29069f26514" containerName="nova-cell0-conductor-conductor" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163169 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="19ae84d4-26f8-4e11-bd01-da880def5547" containerName="barbican-keystone-listener" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163179 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d49cc40-ce20-415f-a979-398430c2bd81" containerName="barbican-worker-log" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163190 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="container-auditor" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163198 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="container-updater" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163206 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7ece585-54a5-40d4-866f-98c968f03910" containerName="mariadb-account-delete" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163217 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe13f626-50c7-4ec3-b967-20f038731571" containerName="keystone-api" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163226 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec26620a-6ad8-4792-bb25-543dc31d3be5" containerName="openstack-network-exporter" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163237 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="334f3bb7-793e-4cff-b0ef-de24dc8a46b5" containerName="barbican-api" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163249 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b90229c-2a39-4627-896f-9c1b27e4f1d5" containerName="proxy-httpd" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163259 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="object-expirer" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163272 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe17b260-d105-4274-88d1-d85fd9948f9f" containerName="probe" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163280 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b90229c-2a39-4627-896f-9c1b27e4f1d5" containerName="sg-core" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163311 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="object-server" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163321 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3dc5e2c-0729-4f4d-8481-bd8fb0064a80" containerName="mariadb-account-delete" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163329 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4fd340f-f656-4ec3-aba1-a33eaa58aed0" containerName="mariadb-account-delete" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163339 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c8fb953-de6b-48ee-afaf-c0a1a8b3acbf" containerName="ovn-controller" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163350 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="container-server" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163358 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="19ae84d4-26f8-4e11-bd01-da880def5547" containerName="barbican-keystone-listener-log" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163366 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae7a4aa7-762d-47f3-8f5c-d2c43a0efd48" containerName="ovsdbserver-sb" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163376 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="300dce8f-4337-4707-8075-f32b93f03e4f" containerName="galera" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163387 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd47ce65-1426-47e2-a5d1-6efd83bac3ab" containerName="nova-cell1-conductor-conductor" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163401 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="20b489db-2066-4222-9131-99da1bd054e3" containerName="openstack-network-exporter" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163413 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="object-replicator" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163427 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="account-reaper" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163437 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="257e4b94-6b37-4243-8e8a-6bd47f0a5603" containerName="dnsmasq-dns" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163447 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="693d1a99-bf33-42ee-adea-2f8ce0f6c002" containerName="glance-log" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163461 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d49cc40-ce20-415f-a979-398430c2bd81" containerName="barbican-worker" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163473 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="80ce8a9a-aa28-40e4-ac35-c7d379224208" containerName="ovs-vswitchd" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163484 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b53df32-369f-4a91-bb97-5da067cc3c6a" containerName="mariadb-account-delete" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163496 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec26620a-6ad8-4792-bb25-543dc31d3be5" containerName="ovn-northd" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163509 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba78b94a-32d0-4377-ac41-ffd036b241bf" containerName="rabbitmq" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163521 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="account-replicator" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163529 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="10886f85-c800-4999-8c79-c490c60696cc" containerName="swift-recon-cron" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163538 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="8df4197d-046b-4b35-a14a-b382bda46242" containerName="mariadb-account-delete" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163552 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="693d1a99-bf33-42ee-adea-2f8ce0f6c002" containerName="glance-httpd" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163568 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="aed636f4-272c-4379-a6f3-8247ae0e46cc" containerName="proxy-httpd" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163582 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="aed636f4-272c-4379-a6f3-8247ae0e46cc" containerName="proxy-server" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163596 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f05291f-1331-411b-9971-c71218d11a35" containerName="rabbitmq" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163606 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2d75a18-6446-4558-af57-c6e0c957fc3b" containerName="placement-log" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.163616 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="10cd5eda-54cc-4c0a-91ca-4f8217e5220e" containerName="mariadb-account-delete" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.164239 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402805-5cbhs" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.169661 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.169813 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.176531 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402805-5cbhs"] Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.313868 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d5f8a89b-07b9-4a28-bca3-8f5564634feb-secret-volume\") pod \"collect-profiles-29402805-5cbhs\" (UID: \"d5f8a89b-07b9-4a28-bca3-8f5564634feb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402805-5cbhs" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.313946 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hdvft\" (UniqueName: \"kubernetes.io/projected/d5f8a89b-07b9-4a28-bca3-8f5564634feb-kube-api-access-hdvft\") pod \"collect-profiles-29402805-5cbhs\" (UID: \"d5f8a89b-07b9-4a28-bca3-8f5564634feb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402805-5cbhs" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.314026 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d5f8a89b-07b9-4a28-bca3-8f5564634feb-config-volume\") pod \"collect-profiles-29402805-5cbhs\" (UID: \"d5f8a89b-07b9-4a28-bca3-8f5564634feb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402805-5cbhs" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.415610 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d5f8a89b-07b9-4a28-bca3-8f5564634feb-secret-volume\") pod \"collect-profiles-29402805-5cbhs\" (UID: \"d5f8a89b-07b9-4a28-bca3-8f5564634feb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402805-5cbhs" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.415690 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hdvft\" (UniqueName: \"kubernetes.io/projected/d5f8a89b-07b9-4a28-bca3-8f5564634feb-kube-api-access-hdvft\") pod \"collect-profiles-29402805-5cbhs\" (UID: \"d5f8a89b-07b9-4a28-bca3-8f5564634feb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402805-5cbhs" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.415779 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d5f8a89b-07b9-4a28-bca3-8f5564634feb-config-volume\") pod \"collect-profiles-29402805-5cbhs\" (UID: \"d5f8a89b-07b9-4a28-bca3-8f5564634feb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402805-5cbhs" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.416804 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d5f8a89b-07b9-4a28-bca3-8f5564634feb-config-volume\") pod \"collect-profiles-29402805-5cbhs\" (UID: \"d5f8a89b-07b9-4a28-bca3-8f5564634feb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402805-5cbhs" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.425557 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d5f8a89b-07b9-4a28-bca3-8f5564634feb-secret-volume\") pod \"collect-profiles-29402805-5cbhs\" (UID: \"d5f8a89b-07b9-4a28-bca3-8f5564634feb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402805-5cbhs" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.430881 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hdvft\" (UniqueName: \"kubernetes.io/projected/d5f8a89b-07b9-4a28-bca3-8f5564634feb-kube-api-access-hdvft\") pod \"collect-profiles-29402805-5cbhs\" (UID: \"d5f8a89b-07b9-4a28-bca3-8f5564634feb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402805-5cbhs" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.523583 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402805-5cbhs" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.908383 5037 scope.go:117] "RemoveContainer" containerID="5e69d7717514aa68d798cc4f8eee9b2d5d3e9666ca3b110c2cb4c6b90f9e1181" Nov 26 14:45:00 crc kubenswrapper[5037]: E1126 14:45:00.909105 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:45:00 crc kubenswrapper[5037]: I1126 14:45:00.946887 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402805-5cbhs"] Nov 26 14:45:01 crc kubenswrapper[5037]: I1126 14:45:01.045210 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402805-5cbhs" event={"ID":"d5f8a89b-07b9-4a28-bca3-8f5564634feb","Type":"ContainerStarted","Data":"76959fe29681392475b1fa8d38f89ee2ab81940d4c9acab939aaa0208a13b72a"} Nov 26 14:45:02 crc kubenswrapper[5037]: I1126 14:45:02.059480 5037 generic.go:334] "Generic (PLEG): container finished" podID="d5f8a89b-07b9-4a28-bca3-8f5564634feb" containerID="a4dbd91b679db67c424df575ea21fe86ee8a6694de9ee93ff35fcc4822988a82" exitCode=0 Nov 26 14:45:02 crc kubenswrapper[5037]: I1126 14:45:02.059564 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402805-5cbhs" event={"ID":"d5f8a89b-07b9-4a28-bca3-8f5564634feb","Type":"ContainerDied","Data":"a4dbd91b679db67c424df575ea21fe86ee8a6694de9ee93ff35fcc4822988a82"} Nov 26 14:45:03 crc kubenswrapper[5037]: I1126 14:45:03.355180 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402805-5cbhs" Nov 26 14:45:03 crc kubenswrapper[5037]: I1126 14:45:03.468466 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d5f8a89b-07b9-4a28-bca3-8f5564634feb-secret-volume\") pod \"d5f8a89b-07b9-4a28-bca3-8f5564634feb\" (UID: \"d5f8a89b-07b9-4a28-bca3-8f5564634feb\") " Nov 26 14:45:03 crc kubenswrapper[5037]: I1126 14:45:03.468588 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hdvft\" (UniqueName: \"kubernetes.io/projected/d5f8a89b-07b9-4a28-bca3-8f5564634feb-kube-api-access-hdvft\") pod \"d5f8a89b-07b9-4a28-bca3-8f5564634feb\" (UID: \"d5f8a89b-07b9-4a28-bca3-8f5564634feb\") " Nov 26 14:45:03 crc kubenswrapper[5037]: I1126 14:45:03.468746 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d5f8a89b-07b9-4a28-bca3-8f5564634feb-config-volume\") pod \"d5f8a89b-07b9-4a28-bca3-8f5564634feb\" (UID: \"d5f8a89b-07b9-4a28-bca3-8f5564634feb\") " Nov 26 14:45:03 crc kubenswrapper[5037]: I1126 14:45:03.469781 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d5f8a89b-07b9-4a28-bca3-8f5564634feb-config-volume" (OuterVolumeSpecName: "config-volume") pod "d5f8a89b-07b9-4a28-bca3-8f5564634feb" (UID: "d5f8a89b-07b9-4a28-bca3-8f5564634feb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 14:45:03 crc kubenswrapper[5037]: I1126 14:45:03.474834 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5f8a89b-07b9-4a28-bca3-8f5564634feb-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "d5f8a89b-07b9-4a28-bca3-8f5564634feb" (UID: "d5f8a89b-07b9-4a28-bca3-8f5564634feb"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 14:45:03 crc kubenswrapper[5037]: I1126 14:45:03.474861 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d5f8a89b-07b9-4a28-bca3-8f5564634feb-kube-api-access-hdvft" (OuterVolumeSpecName: "kube-api-access-hdvft") pod "d5f8a89b-07b9-4a28-bca3-8f5564634feb" (UID: "d5f8a89b-07b9-4a28-bca3-8f5564634feb"). InnerVolumeSpecName "kube-api-access-hdvft". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:45:03 crc kubenswrapper[5037]: I1126 14:45:03.570960 5037 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d5f8a89b-07b9-4a28-bca3-8f5564634feb-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 14:45:03 crc kubenswrapper[5037]: I1126 14:45:03.571004 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hdvft\" (UniqueName: \"kubernetes.io/projected/d5f8a89b-07b9-4a28-bca3-8f5564634feb-kube-api-access-hdvft\") on node \"crc\" DevicePath \"\"" Nov 26 14:45:03 crc kubenswrapper[5037]: I1126 14:45:03.571015 5037 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d5f8a89b-07b9-4a28-bca3-8f5564634feb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 14:45:04 crc kubenswrapper[5037]: I1126 14:45:04.080545 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402805-5cbhs" event={"ID":"d5f8a89b-07b9-4a28-bca3-8f5564634feb","Type":"ContainerDied","Data":"76959fe29681392475b1fa8d38f89ee2ab81940d4c9acab939aaa0208a13b72a"} Nov 26 14:45:04 crc kubenswrapper[5037]: I1126 14:45:04.080816 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="76959fe29681392475b1fa8d38f89ee2ab81940d4c9acab939aaa0208a13b72a" Nov 26 14:45:04 crc kubenswrapper[5037]: I1126 14:45:04.080635 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402805-5cbhs" Nov 26 14:45:11 crc kubenswrapper[5037]: I1126 14:45:11.909534 5037 scope.go:117] "RemoveContainer" containerID="5e69d7717514aa68d798cc4f8eee9b2d5d3e9666ca3b110c2cb4c6b90f9e1181" Nov 26 14:45:11 crc kubenswrapper[5037]: E1126 14:45:11.910825 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:45:24 crc kubenswrapper[5037]: I1126 14:45:24.909613 5037 scope.go:117] "RemoveContainer" containerID="5e69d7717514aa68d798cc4f8eee9b2d5d3e9666ca3b110c2cb4c6b90f9e1181" Nov 26 14:45:24 crc kubenswrapper[5037]: E1126 14:45:24.912480 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:45:25 crc kubenswrapper[5037]: I1126 14:45:25.984531 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-bg5n4"] Nov 26 14:45:25 crc kubenswrapper[5037]: E1126 14:45:25.984897 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5f8a89b-07b9-4a28-bca3-8f5564634feb" containerName="collect-profiles" Nov 26 14:45:25 crc kubenswrapper[5037]: I1126 14:45:25.984913 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5f8a89b-07b9-4a28-bca3-8f5564634feb" containerName="collect-profiles" Nov 26 14:45:25 crc kubenswrapper[5037]: I1126 14:45:25.985098 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5f8a89b-07b9-4a28-bca3-8f5564634feb" containerName="collect-profiles" Nov 26 14:45:25 crc kubenswrapper[5037]: I1126 14:45:25.987623 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bg5n4" Nov 26 14:45:26 crc kubenswrapper[5037]: I1126 14:45:26.019393 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bg5n4"] Nov 26 14:45:26 crc kubenswrapper[5037]: I1126 14:45:26.151116 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ca3683eb-a8e5-4897-a94d-91cb2601fbb5-catalog-content\") pod \"redhat-marketplace-bg5n4\" (UID: \"ca3683eb-a8e5-4897-a94d-91cb2601fbb5\") " pod="openshift-marketplace/redhat-marketplace-bg5n4" Nov 26 14:45:26 crc kubenswrapper[5037]: I1126 14:45:26.151191 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2kc6x\" (UniqueName: \"kubernetes.io/projected/ca3683eb-a8e5-4897-a94d-91cb2601fbb5-kube-api-access-2kc6x\") pod \"redhat-marketplace-bg5n4\" (UID: \"ca3683eb-a8e5-4897-a94d-91cb2601fbb5\") " pod="openshift-marketplace/redhat-marketplace-bg5n4" Nov 26 14:45:26 crc kubenswrapper[5037]: I1126 14:45:26.151386 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ca3683eb-a8e5-4897-a94d-91cb2601fbb5-utilities\") pod \"redhat-marketplace-bg5n4\" (UID: \"ca3683eb-a8e5-4897-a94d-91cb2601fbb5\") " pod="openshift-marketplace/redhat-marketplace-bg5n4" Nov 26 14:45:26 crc kubenswrapper[5037]: I1126 14:45:26.252627 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ca3683eb-a8e5-4897-a94d-91cb2601fbb5-catalog-content\") pod \"redhat-marketplace-bg5n4\" (UID: \"ca3683eb-a8e5-4897-a94d-91cb2601fbb5\") " pod="openshift-marketplace/redhat-marketplace-bg5n4" Nov 26 14:45:26 crc kubenswrapper[5037]: I1126 14:45:26.252694 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2kc6x\" (UniqueName: \"kubernetes.io/projected/ca3683eb-a8e5-4897-a94d-91cb2601fbb5-kube-api-access-2kc6x\") pod \"redhat-marketplace-bg5n4\" (UID: \"ca3683eb-a8e5-4897-a94d-91cb2601fbb5\") " pod="openshift-marketplace/redhat-marketplace-bg5n4" Nov 26 14:45:26 crc kubenswrapper[5037]: I1126 14:45:26.252719 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ca3683eb-a8e5-4897-a94d-91cb2601fbb5-utilities\") pod \"redhat-marketplace-bg5n4\" (UID: \"ca3683eb-a8e5-4897-a94d-91cb2601fbb5\") " pod="openshift-marketplace/redhat-marketplace-bg5n4" Nov 26 14:45:26 crc kubenswrapper[5037]: I1126 14:45:26.253130 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ca3683eb-a8e5-4897-a94d-91cb2601fbb5-catalog-content\") pod \"redhat-marketplace-bg5n4\" (UID: \"ca3683eb-a8e5-4897-a94d-91cb2601fbb5\") " pod="openshift-marketplace/redhat-marketplace-bg5n4" Nov 26 14:45:26 crc kubenswrapper[5037]: I1126 14:45:26.253255 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ca3683eb-a8e5-4897-a94d-91cb2601fbb5-utilities\") pod \"redhat-marketplace-bg5n4\" (UID: \"ca3683eb-a8e5-4897-a94d-91cb2601fbb5\") " pod="openshift-marketplace/redhat-marketplace-bg5n4" Nov 26 14:45:26 crc kubenswrapper[5037]: I1126 14:45:26.283502 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2kc6x\" (UniqueName: \"kubernetes.io/projected/ca3683eb-a8e5-4897-a94d-91cb2601fbb5-kube-api-access-2kc6x\") pod \"redhat-marketplace-bg5n4\" (UID: \"ca3683eb-a8e5-4897-a94d-91cb2601fbb5\") " pod="openshift-marketplace/redhat-marketplace-bg5n4" Nov 26 14:45:26 crc kubenswrapper[5037]: I1126 14:45:26.312679 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bg5n4" Nov 26 14:45:26 crc kubenswrapper[5037]: I1126 14:45:26.818065 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bg5n4"] Nov 26 14:45:27 crc kubenswrapper[5037]: I1126 14:45:27.401271 5037 generic.go:334] "Generic (PLEG): container finished" podID="ca3683eb-a8e5-4897-a94d-91cb2601fbb5" containerID="a03025a44e737db8fff8e6e483af849478d9cef7c22f57a26a153d36dd5a66df" exitCode=0 Nov 26 14:45:27 crc kubenswrapper[5037]: I1126 14:45:27.401595 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bg5n4" event={"ID":"ca3683eb-a8e5-4897-a94d-91cb2601fbb5","Type":"ContainerDied","Data":"a03025a44e737db8fff8e6e483af849478d9cef7c22f57a26a153d36dd5a66df"} Nov 26 14:45:27 crc kubenswrapper[5037]: I1126 14:45:27.401841 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bg5n4" event={"ID":"ca3683eb-a8e5-4897-a94d-91cb2601fbb5","Type":"ContainerStarted","Data":"a9dd8d21dc12eea79c8be9c44c641e2cd08b8dc8c921d4ee946305268b49e919"} Nov 26 14:45:29 crc kubenswrapper[5037]: I1126 14:45:29.442917 5037 generic.go:334] "Generic (PLEG): container finished" podID="ca3683eb-a8e5-4897-a94d-91cb2601fbb5" containerID="4eedad9e1372de07bb0132ab9c34557333e128e66600b7253bf6215ffdaca70a" exitCode=0 Nov 26 14:45:29 crc kubenswrapper[5037]: I1126 14:45:29.442995 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bg5n4" event={"ID":"ca3683eb-a8e5-4897-a94d-91cb2601fbb5","Type":"ContainerDied","Data":"4eedad9e1372de07bb0132ab9c34557333e128e66600b7253bf6215ffdaca70a"} Nov 26 14:45:30 crc kubenswrapper[5037]: I1126 14:45:30.458764 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bg5n4" event={"ID":"ca3683eb-a8e5-4897-a94d-91cb2601fbb5","Type":"ContainerStarted","Data":"0bfe2d4232729ff468d863d892ac43a4284ae8a7d9a59dbb45a78676f300907e"} Nov 26 14:45:30 crc kubenswrapper[5037]: I1126 14:45:30.492670 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-bg5n4" podStartSLOduration=2.93940058 podStartE2EDuration="5.492642553s" podCreationTimestamp="2025-11-26 14:45:25 +0000 UTC" firstStartedPulling="2025-11-26 14:45:27.404016612 +0000 UTC m=+1794.200786806" lastFinishedPulling="2025-11-26 14:45:29.957258555 +0000 UTC m=+1796.754028779" observedRunningTime="2025-11-26 14:45:30.483383586 +0000 UTC m=+1797.280153820" watchObservedRunningTime="2025-11-26 14:45:30.492642553 +0000 UTC m=+1797.289412767" Nov 26 14:45:36 crc kubenswrapper[5037]: I1126 14:45:36.238625 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-cslxk"] Nov 26 14:45:36 crc kubenswrapper[5037]: I1126 14:45:36.241588 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cslxk" Nov 26 14:45:36 crc kubenswrapper[5037]: I1126 14:45:36.253397 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cslxk"] Nov 26 14:45:36 crc kubenswrapper[5037]: I1126 14:45:36.313122 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-bg5n4" Nov 26 14:45:36 crc kubenswrapper[5037]: I1126 14:45:36.313176 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-bg5n4" Nov 26 14:45:36 crc kubenswrapper[5037]: I1126 14:45:36.348713 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vgh9b\" (UniqueName: \"kubernetes.io/projected/670f1e26-c826-4296-b4f9-5b14ce2a3aa4-kube-api-access-vgh9b\") pod \"certified-operators-cslxk\" (UID: \"670f1e26-c826-4296-b4f9-5b14ce2a3aa4\") " pod="openshift-marketplace/certified-operators-cslxk" Nov 26 14:45:36 crc kubenswrapper[5037]: I1126 14:45:36.348836 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/670f1e26-c826-4296-b4f9-5b14ce2a3aa4-utilities\") pod \"certified-operators-cslxk\" (UID: \"670f1e26-c826-4296-b4f9-5b14ce2a3aa4\") " pod="openshift-marketplace/certified-operators-cslxk" Nov 26 14:45:36 crc kubenswrapper[5037]: I1126 14:45:36.349462 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/670f1e26-c826-4296-b4f9-5b14ce2a3aa4-catalog-content\") pod \"certified-operators-cslxk\" (UID: \"670f1e26-c826-4296-b4f9-5b14ce2a3aa4\") " pod="openshift-marketplace/certified-operators-cslxk" Nov 26 14:45:36 crc kubenswrapper[5037]: I1126 14:45:36.367606 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-bg5n4" Nov 26 14:45:36 crc kubenswrapper[5037]: I1126 14:45:36.451150 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/670f1e26-c826-4296-b4f9-5b14ce2a3aa4-utilities\") pod \"certified-operators-cslxk\" (UID: \"670f1e26-c826-4296-b4f9-5b14ce2a3aa4\") " pod="openshift-marketplace/certified-operators-cslxk" Nov 26 14:45:36 crc kubenswrapper[5037]: I1126 14:45:36.451275 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/670f1e26-c826-4296-b4f9-5b14ce2a3aa4-catalog-content\") pod \"certified-operators-cslxk\" (UID: \"670f1e26-c826-4296-b4f9-5b14ce2a3aa4\") " pod="openshift-marketplace/certified-operators-cslxk" Nov 26 14:45:36 crc kubenswrapper[5037]: I1126 14:45:36.451475 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vgh9b\" (UniqueName: \"kubernetes.io/projected/670f1e26-c826-4296-b4f9-5b14ce2a3aa4-kube-api-access-vgh9b\") pod \"certified-operators-cslxk\" (UID: \"670f1e26-c826-4296-b4f9-5b14ce2a3aa4\") " pod="openshift-marketplace/certified-operators-cslxk" Nov 26 14:45:36 crc kubenswrapper[5037]: I1126 14:45:36.452240 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/670f1e26-c826-4296-b4f9-5b14ce2a3aa4-utilities\") pod \"certified-operators-cslxk\" (UID: \"670f1e26-c826-4296-b4f9-5b14ce2a3aa4\") " pod="openshift-marketplace/certified-operators-cslxk" Nov 26 14:45:36 crc kubenswrapper[5037]: I1126 14:45:36.452698 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/670f1e26-c826-4296-b4f9-5b14ce2a3aa4-catalog-content\") pod \"certified-operators-cslxk\" (UID: \"670f1e26-c826-4296-b4f9-5b14ce2a3aa4\") " pod="openshift-marketplace/certified-operators-cslxk" Nov 26 14:45:36 crc kubenswrapper[5037]: I1126 14:45:36.471366 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vgh9b\" (UniqueName: \"kubernetes.io/projected/670f1e26-c826-4296-b4f9-5b14ce2a3aa4-kube-api-access-vgh9b\") pod \"certified-operators-cslxk\" (UID: \"670f1e26-c826-4296-b4f9-5b14ce2a3aa4\") " pod="openshift-marketplace/certified-operators-cslxk" Nov 26 14:45:36 crc kubenswrapper[5037]: I1126 14:45:36.562718 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-bg5n4" Nov 26 14:45:36 crc kubenswrapper[5037]: I1126 14:45:36.570858 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cslxk" Nov 26 14:45:37 crc kubenswrapper[5037]: I1126 14:45:37.045788 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cslxk"] Nov 26 14:45:37 crc kubenswrapper[5037]: W1126 14:45:37.049248 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod670f1e26_c826_4296_b4f9_5b14ce2a3aa4.slice/crio-d0e13b4837a6be512a3e9ce97076bb9f335424f9edd64d14a99a11e989fb907d WatchSource:0}: Error finding container d0e13b4837a6be512a3e9ce97076bb9f335424f9edd64d14a99a11e989fb907d: Status 404 returned error can't find the container with id d0e13b4837a6be512a3e9ce97076bb9f335424f9edd64d14a99a11e989fb907d Nov 26 14:45:37 crc kubenswrapper[5037]: I1126 14:45:37.519791 5037 generic.go:334] "Generic (PLEG): container finished" podID="670f1e26-c826-4296-b4f9-5b14ce2a3aa4" containerID="3018cdb070e3a13092e450c27cf3a2b78ef8dff9736cbfcee0b8a7e1730bea74" exitCode=0 Nov 26 14:45:37 crc kubenswrapper[5037]: I1126 14:45:37.519853 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cslxk" event={"ID":"670f1e26-c826-4296-b4f9-5b14ce2a3aa4","Type":"ContainerDied","Data":"3018cdb070e3a13092e450c27cf3a2b78ef8dff9736cbfcee0b8a7e1730bea74"} Nov 26 14:45:37 crc kubenswrapper[5037]: I1126 14:45:37.519916 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cslxk" event={"ID":"670f1e26-c826-4296-b4f9-5b14ce2a3aa4","Type":"ContainerStarted","Data":"d0e13b4837a6be512a3e9ce97076bb9f335424f9edd64d14a99a11e989fb907d"} Nov 26 14:45:38 crc kubenswrapper[5037]: I1126 14:45:38.615071 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bg5n4"] Nov 26 14:45:38 crc kubenswrapper[5037]: I1126 14:45:38.615603 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-bg5n4" podUID="ca3683eb-a8e5-4897-a94d-91cb2601fbb5" containerName="registry-server" containerID="cri-o://0bfe2d4232729ff468d863d892ac43a4284ae8a7d9a59dbb45a78676f300907e" gracePeriod=2 Nov 26 14:45:38 crc kubenswrapper[5037]: I1126 14:45:38.907745 5037 scope.go:117] "RemoveContainer" containerID="5e69d7717514aa68d798cc4f8eee9b2d5d3e9666ca3b110c2cb4c6b90f9e1181" Nov 26 14:45:38 crc kubenswrapper[5037]: E1126 14:45:38.908070 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:45:39 crc kubenswrapper[5037]: I1126 14:45:39.032707 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bg5n4" Nov 26 14:45:39 crc kubenswrapper[5037]: I1126 14:45:39.192987 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ca3683eb-a8e5-4897-a94d-91cb2601fbb5-catalog-content\") pod \"ca3683eb-a8e5-4897-a94d-91cb2601fbb5\" (UID: \"ca3683eb-a8e5-4897-a94d-91cb2601fbb5\") " Nov 26 14:45:39 crc kubenswrapper[5037]: I1126 14:45:39.193325 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ca3683eb-a8e5-4897-a94d-91cb2601fbb5-utilities\") pod \"ca3683eb-a8e5-4897-a94d-91cb2601fbb5\" (UID: \"ca3683eb-a8e5-4897-a94d-91cb2601fbb5\") " Nov 26 14:45:39 crc kubenswrapper[5037]: I1126 14:45:39.193415 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2kc6x\" (UniqueName: \"kubernetes.io/projected/ca3683eb-a8e5-4897-a94d-91cb2601fbb5-kube-api-access-2kc6x\") pod \"ca3683eb-a8e5-4897-a94d-91cb2601fbb5\" (UID: \"ca3683eb-a8e5-4897-a94d-91cb2601fbb5\") " Nov 26 14:45:39 crc kubenswrapper[5037]: I1126 14:45:39.195690 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ca3683eb-a8e5-4897-a94d-91cb2601fbb5-utilities" (OuterVolumeSpecName: "utilities") pod "ca3683eb-a8e5-4897-a94d-91cb2601fbb5" (UID: "ca3683eb-a8e5-4897-a94d-91cb2601fbb5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:45:39 crc kubenswrapper[5037]: I1126 14:45:39.200866 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca3683eb-a8e5-4897-a94d-91cb2601fbb5-kube-api-access-2kc6x" (OuterVolumeSpecName: "kube-api-access-2kc6x") pod "ca3683eb-a8e5-4897-a94d-91cb2601fbb5" (UID: "ca3683eb-a8e5-4897-a94d-91cb2601fbb5"). InnerVolumeSpecName "kube-api-access-2kc6x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:45:39 crc kubenswrapper[5037]: I1126 14:45:39.215032 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ca3683eb-a8e5-4897-a94d-91cb2601fbb5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ca3683eb-a8e5-4897-a94d-91cb2601fbb5" (UID: "ca3683eb-a8e5-4897-a94d-91cb2601fbb5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:45:39 crc kubenswrapper[5037]: I1126 14:45:39.295232 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ca3683eb-a8e5-4897-a94d-91cb2601fbb5-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 14:45:39 crc kubenswrapper[5037]: I1126 14:45:39.295268 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2kc6x\" (UniqueName: \"kubernetes.io/projected/ca3683eb-a8e5-4897-a94d-91cb2601fbb5-kube-api-access-2kc6x\") on node \"crc\" DevicePath \"\"" Nov 26 14:45:39 crc kubenswrapper[5037]: I1126 14:45:39.295281 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ca3683eb-a8e5-4897-a94d-91cb2601fbb5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 14:45:39 crc kubenswrapper[5037]: I1126 14:45:39.540347 5037 generic.go:334] "Generic (PLEG): container finished" podID="ca3683eb-a8e5-4897-a94d-91cb2601fbb5" containerID="0bfe2d4232729ff468d863d892ac43a4284ae8a7d9a59dbb45a78676f300907e" exitCode=0 Nov 26 14:45:39 crc kubenswrapper[5037]: I1126 14:45:39.540391 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bg5n4" event={"ID":"ca3683eb-a8e5-4897-a94d-91cb2601fbb5","Type":"ContainerDied","Data":"0bfe2d4232729ff468d863d892ac43a4284ae8a7d9a59dbb45a78676f300907e"} Nov 26 14:45:39 crc kubenswrapper[5037]: I1126 14:45:39.540404 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bg5n4" Nov 26 14:45:39 crc kubenswrapper[5037]: I1126 14:45:39.540422 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bg5n4" event={"ID":"ca3683eb-a8e5-4897-a94d-91cb2601fbb5","Type":"ContainerDied","Data":"a9dd8d21dc12eea79c8be9c44c641e2cd08b8dc8c921d4ee946305268b49e919"} Nov 26 14:45:39 crc kubenswrapper[5037]: I1126 14:45:39.540448 5037 scope.go:117] "RemoveContainer" containerID="0bfe2d4232729ff468d863d892ac43a4284ae8a7d9a59dbb45a78676f300907e" Nov 26 14:45:39 crc kubenswrapper[5037]: I1126 14:45:39.571306 5037 scope.go:117] "RemoveContainer" containerID="4eedad9e1372de07bb0132ab9c34557333e128e66600b7253bf6215ffdaca70a" Nov 26 14:45:39 crc kubenswrapper[5037]: I1126 14:45:39.572389 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bg5n4"] Nov 26 14:45:39 crc kubenswrapper[5037]: I1126 14:45:39.576914 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-bg5n4"] Nov 26 14:45:39 crc kubenswrapper[5037]: I1126 14:45:39.607350 5037 scope.go:117] "RemoveContainer" containerID="a03025a44e737db8fff8e6e483af849478d9cef7c22f57a26a153d36dd5a66df" Nov 26 14:45:39 crc kubenswrapper[5037]: I1126 14:45:39.623759 5037 scope.go:117] "RemoveContainer" containerID="0bfe2d4232729ff468d863d892ac43a4284ae8a7d9a59dbb45a78676f300907e" Nov 26 14:45:39 crc kubenswrapper[5037]: E1126 14:45:39.624079 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0bfe2d4232729ff468d863d892ac43a4284ae8a7d9a59dbb45a78676f300907e\": container with ID starting with 0bfe2d4232729ff468d863d892ac43a4284ae8a7d9a59dbb45a78676f300907e not found: ID does not exist" containerID="0bfe2d4232729ff468d863d892ac43a4284ae8a7d9a59dbb45a78676f300907e" Nov 26 14:45:39 crc kubenswrapper[5037]: I1126 14:45:39.624115 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0bfe2d4232729ff468d863d892ac43a4284ae8a7d9a59dbb45a78676f300907e"} err="failed to get container status \"0bfe2d4232729ff468d863d892ac43a4284ae8a7d9a59dbb45a78676f300907e\": rpc error: code = NotFound desc = could not find container \"0bfe2d4232729ff468d863d892ac43a4284ae8a7d9a59dbb45a78676f300907e\": container with ID starting with 0bfe2d4232729ff468d863d892ac43a4284ae8a7d9a59dbb45a78676f300907e not found: ID does not exist" Nov 26 14:45:39 crc kubenswrapper[5037]: I1126 14:45:39.624139 5037 scope.go:117] "RemoveContainer" containerID="4eedad9e1372de07bb0132ab9c34557333e128e66600b7253bf6215ffdaca70a" Nov 26 14:45:39 crc kubenswrapper[5037]: E1126 14:45:39.624451 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4eedad9e1372de07bb0132ab9c34557333e128e66600b7253bf6215ffdaca70a\": container with ID starting with 4eedad9e1372de07bb0132ab9c34557333e128e66600b7253bf6215ffdaca70a not found: ID does not exist" containerID="4eedad9e1372de07bb0132ab9c34557333e128e66600b7253bf6215ffdaca70a" Nov 26 14:45:39 crc kubenswrapper[5037]: I1126 14:45:39.624475 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4eedad9e1372de07bb0132ab9c34557333e128e66600b7253bf6215ffdaca70a"} err="failed to get container status \"4eedad9e1372de07bb0132ab9c34557333e128e66600b7253bf6215ffdaca70a\": rpc error: code = NotFound desc = could not find container \"4eedad9e1372de07bb0132ab9c34557333e128e66600b7253bf6215ffdaca70a\": container with ID starting with 4eedad9e1372de07bb0132ab9c34557333e128e66600b7253bf6215ffdaca70a not found: ID does not exist" Nov 26 14:45:39 crc kubenswrapper[5037]: I1126 14:45:39.624497 5037 scope.go:117] "RemoveContainer" containerID="a03025a44e737db8fff8e6e483af849478d9cef7c22f57a26a153d36dd5a66df" Nov 26 14:45:39 crc kubenswrapper[5037]: E1126 14:45:39.624785 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a03025a44e737db8fff8e6e483af849478d9cef7c22f57a26a153d36dd5a66df\": container with ID starting with a03025a44e737db8fff8e6e483af849478d9cef7c22f57a26a153d36dd5a66df not found: ID does not exist" containerID="a03025a44e737db8fff8e6e483af849478d9cef7c22f57a26a153d36dd5a66df" Nov 26 14:45:39 crc kubenswrapper[5037]: I1126 14:45:39.624814 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a03025a44e737db8fff8e6e483af849478d9cef7c22f57a26a153d36dd5a66df"} err="failed to get container status \"a03025a44e737db8fff8e6e483af849478d9cef7c22f57a26a153d36dd5a66df\": rpc error: code = NotFound desc = could not find container \"a03025a44e737db8fff8e6e483af849478d9cef7c22f57a26a153d36dd5a66df\": container with ID starting with a03025a44e737db8fff8e6e483af849478d9cef7c22f57a26a153d36dd5a66df not found: ID does not exist" Nov 26 14:45:39 crc kubenswrapper[5037]: I1126 14:45:39.917252 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ca3683eb-a8e5-4897-a94d-91cb2601fbb5" path="/var/lib/kubelet/pods/ca3683eb-a8e5-4897-a94d-91cb2601fbb5/volumes" Nov 26 14:45:42 crc kubenswrapper[5037]: I1126 14:45:42.572688 5037 generic.go:334] "Generic (PLEG): container finished" podID="670f1e26-c826-4296-b4f9-5b14ce2a3aa4" containerID="560b857238f78f864faadfbc335db06a41a2b6ad6aed4cdc5d4e92011b9be767" exitCode=0 Nov 26 14:45:42 crc kubenswrapper[5037]: I1126 14:45:42.572773 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cslxk" event={"ID":"670f1e26-c826-4296-b4f9-5b14ce2a3aa4","Type":"ContainerDied","Data":"560b857238f78f864faadfbc335db06a41a2b6ad6aed4cdc5d4e92011b9be767"} Nov 26 14:45:43 crc kubenswrapper[5037]: I1126 14:45:43.584376 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cslxk" event={"ID":"670f1e26-c826-4296-b4f9-5b14ce2a3aa4","Type":"ContainerStarted","Data":"854e1090bb3f46a833d96074ad0dc8aac52814c4ff8c7b2c0e6d717774828680"} Nov 26 14:45:43 crc kubenswrapper[5037]: I1126 14:45:43.605460 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-cslxk" podStartSLOduration=2.114443065 podStartE2EDuration="7.605441525s" podCreationTimestamp="2025-11-26 14:45:36 +0000 UTC" firstStartedPulling="2025-11-26 14:45:37.522972999 +0000 UTC m=+1804.319743223" lastFinishedPulling="2025-11-26 14:45:43.013971489 +0000 UTC m=+1809.810741683" observedRunningTime="2025-11-26 14:45:43.604889152 +0000 UTC m=+1810.401659376" watchObservedRunningTime="2025-11-26 14:45:43.605441525 +0000 UTC m=+1810.402211709" Nov 26 14:45:46 crc kubenswrapper[5037]: I1126 14:45:46.572421 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-cslxk" Nov 26 14:45:46 crc kubenswrapper[5037]: I1126 14:45:46.572782 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-cslxk" Nov 26 14:45:46 crc kubenswrapper[5037]: I1126 14:45:46.646993 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-cslxk" Nov 26 14:45:47 crc kubenswrapper[5037]: I1126 14:45:47.022745 5037 scope.go:117] "RemoveContainer" containerID="7dd90ada4d32cc3f0045ed892e7616d50b4fe26936dc57ecbadc1f8f2b7f6564" Nov 26 14:45:47 crc kubenswrapper[5037]: I1126 14:45:47.080176 5037 scope.go:117] "RemoveContainer" containerID="2800f7d3106987fa8cb86a64eef775428f5a97b3fb2e92fc9188245db77ac484" Nov 26 14:45:47 crc kubenswrapper[5037]: I1126 14:45:47.122078 5037 scope.go:117] "RemoveContainer" containerID="880247269d69779bbf9a952eff9c17aaddc108b220a076adf10b12d9e8d111eb" Nov 26 14:45:47 crc kubenswrapper[5037]: I1126 14:45:47.192840 5037 scope.go:117] "RemoveContainer" containerID="77c7c1aa98cd388dde079bf390d4f95410a791cc036f4afe9dfb28d8e40a7d3a" Nov 26 14:45:47 crc kubenswrapper[5037]: I1126 14:45:47.280831 5037 scope.go:117] "RemoveContainer" containerID="4eb15cb32038efea094cfdd9de7d738b4b794156617ac3711e9211ae2164489c" Nov 26 14:45:47 crc kubenswrapper[5037]: I1126 14:45:47.369548 5037 scope.go:117] "RemoveContainer" containerID="0dd95fb86f5c9c7f2c0e3579b779a59b83c93b72c6f6bf8325a47fc2e45d68b7" Nov 26 14:45:47 crc kubenswrapper[5037]: I1126 14:45:47.403830 5037 scope.go:117] "RemoveContainer" containerID="cacc6b734cc7e2bff01ef129c3b33307509445bfd203135818102bd120e021b4" Nov 26 14:45:47 crc kubenswrapper[5037]: I1126 14:45:47.434806 5037 scope.go:117] "RemoveContainer" containerID="21def421ecbae44cdacf0b7d1286303125364d4d05b4ade8922e55bf4ed25ba9" Nov 26 14:45:47 crc kubenswrapper[5037]: I1126 14:45:47.494071 5037 scope.go:117] "RemoveContainer" containerID="462372bb6dc3a0113047a1731960cccabf72571beca4065fd8e4365e72f78a6d" Nov 26 14:45:47 crc kubenswrapper[5037]: I1126 14:45:47.589045 5037 scope.go:117] "RemoveContainer" containerID="86b088378442a6bc7aaa68a43a1c6be73e43cab073b7fe25ffe042a8a2402305" Nov 26 14:45:50 crc kubenswrapper[5037]: I1126 14:45:50.909324 5037 scope.go:117] "RemoveContainer" containerID="5e69d7717514aa68d798cc4f8eee9b2d5d3e9666ca3b110c2cb4c6b90f9e1181" Nov 26 14:45:51 crc kubenswrapper[5037]: I1126 14:45:51.679547 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" event={"ID":"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb","Type":"ContainerStarted","Data":"86f6ce2549760d5fe019e1ea055aeb6c0bac934ae51bf431e44599a4a8c9c0d2"} Nov 26 14:45:56 crc kubenswrapper[5037]: I1126 14:45:56.644858 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-cslxk" Nov 26 14:45:56 crc kubenswrapper[5037]: I1126 14:45:56.715058 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cslxk"] Nov 26 14:45:56 crc kubenswrapper[5037]: I1126 14:45:56.773273 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-pfb76"] Nov 26 14:45:56 crc kubenswrapper[5037]: I1126 14:45:56.773579 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-pfb76" podUID="a666cf42-14be-48a6-825d-65f7888c45a5" containerName="registry-server" containerID="cri-o://a75c4fb3856dbec91d253de1eae25a260f371952bac3015c5884f6d4f35eda6a" gracePeriod=2 Nov 26 14:45:57 crc kubenswrapper[5037]: I1126 14:45:57.259722 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pfb76" Nov 26 14:45:57 crc kubenswrapper[5037]: I1126 14:45:57.379989 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a666cf42-14be-48a6-825d-65f7888c45a5-utilities\") pod \"a666cf42-14be-48a6-825d-65f7888c45a5\" (UID: \"a666cf42-14be-48a6-825d-65f7888c45a5\") " Nov 26 14:45:57 crc kubenswrapper[5037]: I1126 14:45:57.380144 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cjlj5\" (UniqueName: \"kubernetes.io/projected/a666cf42-14be-48a6-825d-65f7888c45a5-kube-api-access-cjlj5\") pod \"a666cf42-14be-48a6-825d-65f7888c45a5\" (UID: \"a666cf42-14be-48a6-825d-65f7888c45a5\") " Nov 26 14:45:57 crc kubenswrapper[5037]: I1126 14:45:57.380204 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a666cf42-14be-48a6-825d-65f7888c45a5-catalog-content\") pod \"a666cf42-14be-48a6-825d-65f7888c45a5\" (UID: \"a666cf42-14be-48a6-825d-65f7888c45a5\") " Nov 26 14:45:57 crc kubenswrapper[5037]: I1126 14:45:57.380491 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a666cf42-14be-48a6-825d-65f7888c45a5-utilities" (OuterVolumeSpecName: "utilities") pod "a666cf42-14be-48a6-825d-65f7888c45a5" (UID: "a666cf42-14be-48a6-825d-65f7888c45a5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:45:57 crc kubenswrapper[5037]: I1126 14:45:57.395583 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a666cf42-14be-48a6-825d-65f7888c45a5-kube-api-access-cjlj5" (OuterVolumeSpecName: "kube-api-access-cjlj5") pod "a666cf42-14be-48a6-825d-65f7888c45a5" (UID: "a666cf42-14be-48a6-825d-65f7888c45a5"). InnerVolumeSpecName "kube-api-access-cjlj5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:45:57 crc kubenswrapper[5037]: I1126 14:45:57.418668 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a666cf42-14be-48a6-825d-65f7888c45a5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a666cf42-14be-48a6-825d-65f7888c45a5" (UID: "a666cf42-14be-48a6-825d-65f7888c45a5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:45:57 crc kubenswrapper[5037]: I1126 14:45:57.482458 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cjlj5\" (UniqueName: \"kubernetes.io/projected/a666cf42-14be-48a6-825d-65f7888c45a5-kube-api-access-cjlj5\") on node \"crc\" DevicePath \"\"" Nov 26 14:45:57 crc kubenswrapper[5037]: I1126 14:45:57.482495 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a666cf42-14be-48a6-825d-65f7888c45a5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 14:45:57 crc kubenswrapper[5037]: I1126 14:45:57.482506 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a666cf42-14be-48a6-825d-65f7888c45a5-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 14:45:57 crc kubenswrapper[5037]: I1126 14:45:57.731472 5037 generic.go:334] "Generic (PLEG): container finished" podID="a666cf42-14be-48a6-825d-65f7888c45a5" containerID="a75c4fb3856dbec91d253de1eae25a260f371952bac3015c5884f6d4f35eda6a" exitCode=0 Nov 26 14:45:57 crc kubenswrapper[5037]: I1126 14:45:57.731516 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pfb76" event={"ID":"a666cf42-14be-48a6-825d-65f7888c45a5","Type":"ContainerDied","Data":"a75c4fb3856dbec91d253de1eae25a260f371952bac3015c5884f6d4f35eda6a"} Nov 26 14:45:57 crc kubenswrapper[5037]: I1126 14:45:57.731528 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pfb76" Nov 26 14:45:57 crc kubenswrapper[5037]: I1126 14:45:57.731540 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pfb76" event={"ID":"a666cf42-14be-48a6-825d-65f7888c45a5","Type":"ContainerDied","Data":"69b407d544bdd9436549d9139e679f59935e44eb2ba260999cf6bdb47315b7cd"} Nov 26 14:45:57 crc kubenswrapper[5037]: I1126 14:45:57.731571 5037 scope.go:117] "RemoveContainer" containerID="a75c4fb3856dbec91d253de1eae25a260f371952bac3015c5884f6d4f35eda6a" Nov 26 14:45:57 crc kubenswrapper[5037]: I1126 14:45:57.756132 5037 scope.go:117] "RemoveContainer" containerID="55e0803fe9e66245c36e5bdc9fe525dba7c395475a45a4c9b3bae96c9c6b4e75" Nov 26 14:45:57 crc kubenswrapper[5037]: I1126 14:45:57.764715 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-pfb76"] Nov 26 14:45:57 crc kubenswrapper[5037]: I1126 14:45:57.782120 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-pfb76"] Nov 26 14:45:57 crc kubenswrapper[5037]: I1126 14:45:57.791592 5037 scope.go:117] "RemoveContainer" containerID="e33565215826889ff2c2181ed758cc93e518b6c7a24817da7374e4daf12e5266" Nov 26 14:45:57 crc kubenswrapper[5037]: I1126 14:45:57.814691 5037 scope.go:117] "RemoveContainer" containerID="a75c4fb3856dbec91d253de1eae25a260f371952bac3015c5884f6d4f35eda6a" Nov 26 14:45:57 crc kubenswrapper[5037]: E1126 14:45:57.815407 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a75c4fb3856dbec91d253de1eae25a260f371952bac3015c5884f6d4f35eda6a\": container with ID starting with a75c4fb3856dbec91d253de1eae25a260f371952bac3015c5884f6d4f35eda6a not found: ID does not exist" containerID="a75c4fb3856dbec91d253de1eae25a260f371952bac3015c5884f6d4f35eda6a" Nov 26 14:45:57 crc kubenswrapper[5037]: I1126 14:45:57.815472 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a75c4fb3856dbec91d253de1eae25a260f371952bac3015c5884f6d4f35eda6a"} err="failed to get container status \"a75c4fb3856dbec91d253de1eae25a260f371952bac3015c5884f6d4f35eda6a\": rpc error: code = NotFound desc = could not find container \"a75c4fb3856dbec91d253de1eae25a260f371952bac3015c5884f6d4f35eda6a\": container with ID starting with a75c4fb3856dbec91d253de1eae25a260f371952bac3015c5884f6d4f35eda6a not found: ID does not exist" Nov 26 14:45:57 crc kubenswrapper[5037]: I1126 14:45:57.815505 5037 scope.go:117] "RemoveContainer" containerID="55e0803fe9e66245c36e5bdc9fe525dba7c395475a45a4c9b3bae96c9c6b4e75" Nov 26 14:45:57 crc kubenswrapper[5037]: E1126 14:45:57.815877 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"55e0803fe9e66245c36e5bdc9fe525dba7c395475a45a4c9b3bae96c9c6b4e75\": container with ID starting with 55e0803fe9e66245c36e5bdc9fe525dba7c395475a45a4c9b3bae96c9c6b4e75 not found: ID does not exist" containerID="55e0803fe9e66245c36e5bdc9fe525dba7c395475a45a4c9b3bae96c9c6b4e75" Nov 26 14:45:57 crc kubenswrapper[5037]: I1126 14:45:57.815936 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"55e0803fe9e66245c36e5bdc9fe525dba7c395475a45a4c9b3bae96c9c6b4e75"} err="failed to get container status \"55e0803fe9e66245c36e5bdc9fe525dba7c395475a45a4c9b3bae96c9c6b4e75\": rpc error: code = NotFound desc = could not find container \"55e0803fe9e66245c36e5bdc9fe525dba7c395475a45a4c9b3bae96c9c6b4e75\": container with ID starting with 55e0803fe9e66245c36e5bdc9fe525dba7c395475a45a4c9b3bae96c9c6b4e75 not found: ID does not exist" Nov 26 14:45:57 crc kubenswrapper[5037]: I1126 14:45:57.815972 5037 scope.go:117] "RemoveContainer" containerID="e33565215826889ff2c2181ed758cc93e518b6c7a24817da7374e4daf12e5266" Nov 26 14:45:57 crc kubenswrapper[5037]: E1126 14:45:57.816361 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e33565215826889ff2c2181ed758cc93e518b6c7a24817da7374e4daf12e5266\": container with ID starting with e33565215826889ff2c2181ed758cc93e518b6c7a24817da7374e4daf12e5266 not found: ID does not exist" containerID="e33565215826889ff2c2181ed758cc93e518b6c7a24817da7374e4daf12e5266" Nov 26 14:45:57 crc kubenswrapper[5037]: I1126 14:45:57.816392 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e33565215826889ff2c2181ed758cc93e518b6c7a24817da7374e4daf12e5266"} err="failed to get container status \"e33565215826889ff2c2181ed758cc93e518b6c7a24817da7374e4daf12e5266\": rpc error: code = NotFound desc = could not find container \"e33565215826889ff2c2181ed758cc93e518b6c7a24817da7374e4daf12e5266\": container with ID starting with e33565215826889ff2c2181ed758cc93e518b6c7a24817da7374e4daf12e5266 not found: ID does not exist" Nov 26 14:45:57 crc kubenswrapper[5037]: I1126 14:45:57.916175 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a666cf42-14be-48a6-825d-65f7888c45a5" path="/var/lib/kubelet/pods/a666cf42-14be-48a6-825d-65f7888c45a5/volumes" Nov 26 14:46:33 crc kubenswrapper[5037]: I1126 14:46:33.854895 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-d5cjp"] Nov 26 14:46:33 crc kubenswrapper[5037]: E1126 14:46:33.855819 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a666cf42-14be-48a6-825d-65f7888c45a5" containerName="extract-content" Nov 26 14:46:33 crc kubenswrapper[5037]: I1126 14:46:33.855838 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="a666cf42-14be-48a6-825d-65f7888c45a5" containerName="extract-content" Nov 26 14:46:33 crc kubenswrapper[5037]: E1126 14:46:33.855860 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca3683eb-a8e5-4897-a94d-91cb2601fbb5" containerName="extract-content" Nov 26 14:46:33 crc kubenswrapper[5037]: I1126 14:46:33.855868 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca3683eb-a8e5-4897-a94d-91cb2601fbb5" containerName="extract-content" Nov 26 14:46:33 crc kubenswrapper[5037]: E1126 14:46:33.855888 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a666cf42-14be-48a6-825d-65f7888c45a5" containerName="extract-utilities" Nov 26 14:46:33 crc kubenswrapper[5037]: I1126 14:46:33.855898 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="a666cf42-14be-48a6-825d-65f7888c45a5" containerName="extract-utilities" Nov 26 14:46:33 crc kubenswrapper[5037]: E1126 14:46:33.855919 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca3683eb-a8e5-4897-a94d-91cb2601fbb5" containerName="extract-utilities" Nov 26 14:46:33 crc kubenswrapper[5037]: I1126 14:46:33.855927 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca3683eb-a8e5-4897-a94d-91cb2601fbb5" containerName="extract-utilities" Nov 26 14:46:33 crc kubenswrapper[5037]: E1126 14:46:33.855939 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca3683eb-a8e5-4897-a94d-91cb2601fbb5" containerName="registry-server" Nov 26 14:46:33 crc kubenswrapper[5037]: I1126 14:46:33.855947 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca3683eb-a8e5-4897-a94d-91cb2601fbb5" containerName="registry-server" Nov 26 14:46:33 crc kubenswrapper[5037]: E1126 14:46:33.855971 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a666cf42-14be-48a6-825d-65f7888c45a5" containerName="registry-server" Nov 26 14:46:33 crc kubenswrapper[5037]: I1126 14:46:33.855979 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="a666cf42-14be-48a6-825d-65f7888c45a5" containerName="registry-server" Nov 26 14:46:33 crc kubenswrapper[5037]: I1126 14:46:33.856177 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="a666cf42-14be-48a6-825d-65f7888c45a5" containerName="registry-server" Nov 26 14:46:33 crc kubenswrapper[5037]: I1126 14:46:33.856194 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca3683eb-a8e5-4897-a94d-91cb2601fbb5" containerName="registry-server" Nov 26 14:46:33 crc kubenswrapper[5037]: I1126 14:46:33.857470 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d5cjp" Nov 26 14:46:33 crc kubenswrapper[5037]: I1126 14:46:33.872235 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-d5cjp"] Nov 26 14:46:34 crc kubenswrapper[5037]: I1126 14:46:34.024805 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gtbh6\" (UniqueName: \"kubernetes.io/projected/9b6b2ab4-dfd0-423d-a160-815568f7deb7-kube-api-access-gtbh6\") pod \"community-operators-d5cjp\" (UID: \"9b6b2ab4-dfd0-423d-a160-815568f7deb7\") " pod="openshift-marketplace/community-operators-d5cjp" Nov 26 14:46:34 crc kubenswrapper[5037]: I1126 14:46:34.024871 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b6b2ab4-dfd0-423d-a160-815568f7deb7-catalog-content\") pod \"community-operators-d5cjp\" (UID: \"9b6b2ab4-dfd0-423d-a160-815568f7deb7\") " pod="openshift-marketplace/community-operators-d5cjp" Nov 26 14:46:34 crc kubenswrapper[5037]: I1126 14:46:34.024902 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b6b2ab4-dfd0-423d-a160-815568f7deb7-utilities\") pod \"community-operators-d5cjp\" (UID: \"9b6b2ab4-dfd0-423d-a160-815568f7deb7\") " pod="openshift-marketplace/community-operators-d5cjp" Nov 26 14:46:34 crc kubenswrapper[5037]: I1126 14:46:34.126576 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b6b2ab4-dfd0-423d-a160-815568f7deb7-catalog-content\") pod \"community-operators-d5cjp\" (UID: \"9b6b2ab4-dfd0-423d-a160-815568f7deb7\") " pod="openshift-marketplace/community-operators-d5cjp" Nov 26 14:46:34 crc kubenswrapper[5037]: I1126 14:46:34.126646 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b6b2ab4-dfd0-423d-a160-815568f7deb7-utilities\") pod \"community-operators-d5cjp\" (UID: \"9b6b2ab4-dfd0-423d-a160-815568f7deb7\") " pod="openshift-marketplace/community-operators-d5cjp" Nov 26 14:46:34 crc kubenswrapper[5037]: I1126 14:46:34.126741 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gtbh6\" (UniqueName: \"kubernetes.io/projected/9b6b2ab4-dfd0-423d-a160-815568f7deb7-kube-api-access-gtbh6\") pod \"community-operators-d5cjp\" (UID: \"9b6b2ab4-dfd0-423d-a160-815568f7deb7\") " pod="openshift-marketplace/community-operators-d5cjp" Nov 26 14:46:34 crc kubenswrapper[5037]: I1126 14:46:34.127101 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b6b2ab4-dfd0-423d-a160-815568f7deb7-catalog-content\") pod \"community-operators-d5cjp\" (UID: \"9b6b2ab4-dfd0-423d-a160-815568f7deb7\") " pod="openshift-marketplace/community-operators-d5cjp" Nov 26 14:46:34 crc kubenswrapper[5037]: I1126 14:46:34.127216 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b6b2ab4-dfd0-423d-a160-815568f7deb7-utilities\") pod \"community-operators-d5cjp\" (UID: \"9b6b2ab4-dfd0-423d-a160-815568f7deb7\") " pod="openshift-marketplace/community-operators-d5cjp" Nov 26 14:46:34 crc kubenswrapper[5037]: I1126 14:46:34.146799 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gtbh6\" (UniqueName: \"kubernetes.io/projected/9b6b2ab4-dfd0-423d-a160-815568f7deb7-kube-api-access-gtbh6\") pod \"community-operators-d5cjp\" (UID: \"9b6b2ab4-dfd0-423d-a160-815568f7deb7\") " pod="openshift-marketplace/community-operators-d5cjp" Nov 26 14:46:34 crc kubenswrapper[5037]: I1126 14:46:34.189437 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d5cjp" Nov 26 14:46:34 crc kubenswrapper[5037]: I1126 14:46:34.675665 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-d5cjp"] Nov 26 14:46:34 crc kubenswrapper[5037]: W1126 14:46:34.682930 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9b6b2ab4_dfd0_423d_a160_815568f7deb7.slice/crio-051796db30dc5ce4854faf85ce0d31ef602a367e0cadec1f8376bd5c1f92d799 WatchSource:0}: Error finding container 051796db30dc5ce4854faf85ce0d31ef602a367e0cadec1f8376bd5c1f92d799: Status 404 returned error can't find the container with id 051796db30dc5ce4854faf85ce0d31ef602a367e0cadec1f8376bd5c1f92d799 Nov 26 14:46:35 crc kubenswrapper[5037]: I1126 14:46:35.103838 5037 generic.go:334] "Generic (PLEG): container finished" podID="9b6b2ab4-dfd0-423d-a160-815568f7deb7" containerID="b5af5f6d4d36632a7b9b9e1b0a9a0c54131941d04ca5dd5c59c7197ef46890dd" exitCode=0 Nov 26 14:46:35 crc kubenswrapper[5037]: I1126 14:46:35.103896 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d5cjp" event={"ID":"9b6b2ab4-dfd0-423d-a160-815568f7deb7","Type":"ContainerDied","Data":"b5af5f6d4d36632a7b9b9e1b0a9a0c54131941d04ca5dd5c59c7197ef46890dd"} Nov 26 14:46:35 crc kubenswrapper[5037]: I1126 14:46:35.103925 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d5cjp" event={"ID":"9b6b2ab4-dfd0-423d-a160-815568f7deb7","Type":"ContainerStarted","Data":"051796db30dc5ce4854faf85ce0d31ef602a367e0cadec1f8376bd5c1f92d799"} Nov 26 14:46:37 crc kubenswrapper[5037]: I1126 14:46:37.121179 5037 generic.go:334] "Generic (PLEG): container finished" podID="9b6b2ab4-dfd0-423d-a160-815568f7deb7" containerID="69ccddabd49089619eed2d3aed496ca9bbc7582a6ae223ea331952cb326e744d" exitCode=0 Nov 26 14:46:37 crc kubenswrapper[5037]: I1126 14:46:37.121233 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d5cjp" event={"ID":"9b6b2ab4-dfd0-423d-a160-815568f7deb7","Type":"ContainerDied","Data":"69ccddabd49089619eed2d3aed496ca9bbc7582a6ae223ea331952cb326e744d"} Nov 26 14:46:38 crc kubenswrapper[5037]: I1126 14:46:38.130951 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d5cjp" event={"ID":"9b6b2ab4-dfd0-423d-a160-815568f7deb7","Type":"ContainerStarted","Data":"be777518e110bdfbd8581c25795fada7e06c1bf23caae2228a8ac2d5a733a33e"} Nov 26 14:46:38 crc kubenswrapper[5037]: I1126 14:46:38.157103 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-d5cjp" podStartSLOduration=2.495342221 podStartE2EDuration="5.157065095s" podCreationTimestamp="2025-11-26 14:46:33 +0000 UTC" firstStartedPulling="2025-11-26 14:46:35.106118634 +0000 UTC m=+1861.902888838" lastFinishedPulling="2025-11-26 14:46:37.767841528 +0000 UTC m=+1864.564611712" observedRunningTime="2025-11-26 14:46:38.150575167 +0000 UTC m=+1864.947345341" watchObservedRunningTime="2025-11-26 14:46:38.157065095 +0000 UTC m=+1864.953835329" Nov 26 14:46:44 crc kubenswrapper[5037]: I1126 14:46:44.190321 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-d5cjp" Nov 26 14:46:44 crc kubenswrapper[5037]: I1126 14:46:44.190753 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-d5cjp" Nov 26 14:46:44 crc kubenswrapper[5037]: I1126 14:46:44.260228 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-d5cjp" Nov 26 14:46:45 crc kubenswrapper[5037]: I1126 14:46:45.241170 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-d5cjp" Nov 26 14:46:45 crc kubenswrapper[5037]: I1126 14:46:45.297202 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-d5cjp"] Nov 26 14:46:47 crc kubenswrapper[5037]: I1126 14:46:47.210786 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-d5cjp" podUID="9b6b2ab4-dfd0-423d-a160-815568f7deb7" containerName="registry-server" containerID="cri-o://be777518e110bdfbd8581c25795fada7e06c1bf23caae2228a8ac2d5a733a33e" gracePeriod=2 Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.046246 5037 scope.go:117] "RemoveContainer" containerID="5ca9c40161d4eda7b873f1e18d11190b089950565570999848569efa39f20890" Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.088875 5037 scope.go:117] "RemoveContainer" containerID="da1a1cd459be2e9979d9f0c94ca48d383fd106014f27219fe0eb565893f80f4f" Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.121708 5037 scope.go:117] "RemoveContainer" containerID="cc6733dda97a3c0d2ee7075a71f1b7520ad8f6875df7af6d1a7a9513450a7825" Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.156334 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d5cjp" Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.184844 5037 scope.go:117] "RemoveContainer" containerID="941fc9a3959ee8002aab79da103b04201cca95b57887065e0bd2e9bc035ac27b" Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.211981 5037 scope.go:117] "RemoveContainer" containerID="054c2f78fc498fecc3b64d7998923b3399f2ef4cd85d88ab4a737286fbb32ff5" Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.221325 5037 generic.go:334] "Generic (PLEG): container finished" podID="9b6b2ab4-dfd0-423d-a160-815568f7deb7" containerID="be777518e110bdfbd8581c25795fada7e06c1bf23caae2228a8ac2d5a733a33e" exitCode=0 Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.221412 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d5cjp" event={"ID":"9b6b2ab4-dfd0-423d-a160-815568f7deb7","Type":"ContainerDied","Data":"be777518e110bdfbd8581c25795fada7e06c1bf23caae2228a8ac2d5a733a33e"} Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.221440 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d5cjp" event={"ID":"9b6b2ab4-dfd0-423d-a160-815568f7deb7","Type":"ContainerDied","Data":"051796db30dc5ce4854faf85ce0d31ef602a367e0cadec1f8376bd5c1f92d799"} Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.221483 5037 scope.go:117] "RemoveContainer" containerID="be777518e110bdfbd8581c25795fada7e06c1bf23caae2228a8ac2d5a733a33e" Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.221648 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d5cjp" Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.245105 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b6b2ab4-dfd0-423d-a160-815568f7deb7-catalog-content\") pod \"9b6b2ab4-dfd0-423d-a160-815568f7deb7\" (UID: \"9b6b2ab4-dfd0-423d-a160-815568f7deb7\") " Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.245220 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b6b2ab4-dfd0-423d-a160-815568f7deb7-utilities\") pod \"9b6b2ab4-dfd0-423d-a160-815568f7deb7\" (UID: \"9b6b2ab4-dfd0-423d-a160-815568f7deb7\") " Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.245262 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gtbh6\" (UniqueName: \"kubernetes.io/projected/9b6b2ab4-dfd0-423d-a160-815568f7deb7-kube-api-access-gtbh6\") pod \"9b6b2ab4-dfd0-423d-a160-815568f7deb7\" (UID: \"9b6b2ab4-dfd0-423d-a160-815568f7deb7\") " Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.247222 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b6b2ab4-dfd0-423d-a160-815568f7deb7-utilities" (OuterVolumeSpecName: "utilities") pod "9b6b2ab4-dfd0-423d-a160-815568f7deb7" (UID: "9b6b2ab4-dfd0-423d-a160-815568f7deb7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.248638 5037 scope.go:117] "RemoveContainer" containerID="3a6b281560c42c3b10b3817d2b216fece39436338f5a606eb423a1727282ed62" Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.251261 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b6b2ab4-dfd0-423d-a160-815568f7deb7-kube-api-access-gtbh6" (OuterVolumeSpecName: "kube-api-access-gtbh6") pod "9b6b2ab4-dfd0-423d-a160-815568f7deb7" (UID: "9b6b2ab4-dfd0-423d-a160-815568f7deb7"). InnerVolumeSpecName "kube-api-access-gtbh6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.261211 5037 scope.go:117] "RemoveContainer" containerID="69ccddabd49089619eed2d3aed496ca9bbc7582a6ae223ea331952cb326e744d" Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.281455 5037 scope.go:117] "RemoveContainer" containerID="28fcb153066df814b94c4891251d6d55dbcb4934ca8da295cafa2d5cddb4b9cf" Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.298884 5037 scope.go:117] "RemoveContainer" containerID="b5af5f6d4d36632a7b9b9e1b0a9a0c54131941d04ca5dd5c59c7197ef46890dd" Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.302534 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b6b2ab4-dfd0-423d-a160-815568f7deb7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9b6b2ab4-dfd0-423d-a160-815568f7deb7" (UID: "9b6b2ab4-dfd0-423d-a160-815568f7deb7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.330475 5037 scope.go:117] "RemoveContainer" containerID="7acd7256521600da6ac1e6813aa3e9cf78cfc3cd65ec85f50d2f45137be61eac" Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.330541 5037 scope.go:117] "RemoveContainer" containerID="be777518e110bdfbd8581c25795fada7e06c1bf23caae2228a8ac2d5a733a33e" Nov 26 14:46:48 crc kubenswrapper[5037]: E1126 14:46:48.331484 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be777518e110bdfbd8581c25795fada7e06c1bf23caae2228a8ac2d5a733a33e\": container with ID starting with be777518e110bdfbd8581c25795fada7e06c1bf23caae2228a8ac2d5a733a33e not found: ID does not exist" containerID="be777518e110bdfbd8581c25795fada7e06c1bf23caae2228a8ac2d5a733a33e" Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.331541 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be777518e110bdfbd8581c25795fada7e06c1bf23caae2228a8ac2d5a733a33e"} err="failed to get container status \"be777518e110bdfbd8581c25795fada7e06c1bf23caae2228a8ac2d5a733a33e\": rpc error: code = NotFound desc = could not find container \"be777518e110bdfbd8581c25795fada7e06c1bf23caae2228a8ac2d5a733a33e\": container with ID starting with be777518e110bdfbd8581c25795fada7e06c1bf23caae2228a8ac2d5a733a33e not found: ID does not exist" Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.331576 5037 scope.go:117] "RemoveContainer" containerID="69ccddabd49089619eed2d3aed496ca9bbc7582a6ae223ea331952cb326e744d" Nov 26 14:46:48 crc kubenswrapper[5037]: E1126 14:46:48.331946 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"69ccddabd49089619eed2d3aed496ca9bbc7582a6ae223ea331952cb326e744d\": container with ID starting with 69ccddabd49089619eed2d3aed496ca9bbc7582a6ae223ea331952cb326e744d not found: ID does not exist" containerID="69ccddabd49089619eed2d3aed496ca9bbc7582a6ae223ea331952cb326e744d" Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.331970 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"69ccddabd49089619eed2d3aed496ca9bbc7582a6ae223ea331952cb326e744d"} err="failed to get container status \"69ccddabd49089619eed2d3aed496ca9bbc7582a6ae223ea331952cb326e744d\": rpc error: code = NotFound desc = could not find container \"69ccddabd49089619eed2d3aed496ca9bbc7582a6ae223ea331952cb326e744d\": container with ID starting with 69ccddabd49089619eed2d3aed496ca9bbc7582a6ae223ea331952cb326e744d not found: ID does not exist" Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.331992 5037 scope.go:117] "RemoveContainer" containerID="b5af5f6d4d36632a7b9b9e1b0a9a0c54131941d04ca5dd5c59c7197ef46890dd" Nov 26 14:46:48 crc kubenswrapper[5037]: E1126 14:46:48.332400 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b5af5f6d4d36632a7b9b9e1b0a9a0c54131941d04ca5dd5c59c7197ef46890dd\": container with ID starting with b5af5f6d4d36632a7b9b9e1b0a9a0c54131941d04ca5dd5c59c7197ef46890dd not found: ID does not exist" containerID="b5af5f6d4d36632a7b9b9e1b0a9a0c54131941d04ca5dd5c59c7197ef46890dd" Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.332420 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b5af5f6d4d36632a7b9b9e1b0a9a0c54131941d04ca5dd5c59c7197ef46890dd"} err="failed to get container status \"b5af5f6d4d36632a7b9b9e1b0a9a0c54131941d04ca5dd5c59c7197ef46890dd\": rpc error: code = NotFound desc = could not find container \"b5af5f6d4d36632a7b9b9e1b0a9a0c54131941d04ca5dd5c59c7197ef46890dd\": container with ID starting with b5af5f6d4d36632a7b9b9e1b0a9a0c54131941d04ca5dd5c59c7197ef46890dd not found: ID does not exist" Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.357476 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b6b2ab4-dfd0-423d-a160-815568f7deb7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.357666 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b6b2ab4-dfd0-423d-a160-815568f7deb7-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.357760 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gtbh6\" (UniqueName: \"kubernetes.io/projected/9b6b2ab4-dfd0-423d-a160-815568f7deb7-kube-api-access-gtbh6\") on node \"crc\" DevicePath \"\"" Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.388639 5037 scope.go:117] "RemoveContainer" containerID="14aebf93316a419a26ed225520cf8463cecfed50b9d7d41f74af43e5f4d6d686" Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.407794 5037 scope.go:117] "RemoveContainer" containerID="08aa4f4dbe17185b559c1307060da7ba09ed7694916c81cee021536293b3f886" Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.466490 5037 scope.go:117] "RemoveContainer" containerID="9e5f3c5c90e9812c570ac0055351a47f991610d4575a96e8c965dcfe4537a190" Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.529477 5037 scope.go:117] "RemoveContainer" containerID="fc5934dda46def196ac4d0b4a908e9d8983ca4256b62c09dea1b070f18a57463" Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.571077 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-d5cjp"] Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.577548 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-d5cjp"] Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.593760 5037 scope.go:117] "RemoveContainer" containerID="012cb3b03b0ecb9c337fe08575cd3c4a80bdc4a9dd8c07213bf0de31477aa103" Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.610830 5037 scope.go:117] "RemoveContainer" containerID="88558c083c5cd020dbbbc7911d8c1ff0846d988d99c33563252e02c9bde2f0cf" Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.632374 5037 scope.go:117] "RemoveContainer" containerID="cfd4a00f08204fabb6bb9f632ea7584c7a2da1a6a0424ec85b2ab6cdf18489eb" Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.659793 5037 scope.go:117] "RemoveContainer" containerID="c290f83dd69d9c3a68537baf518675b47c29941befa9a3c4b26cc953e62cd55a" Nov 26 14:46:48 crc kubenswrapper[5037]: I1126 14:46:48.687118 5037 scope.go:117] "RemoveContainer" containerID="743bc6d2ce27587012763710fbf1732a386ed06688e5f64f97de51cecbdae318" Nov 26 14:46:49 crc kubenswrapper[5037]: I1126 14:46:49.923863 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b6b2ab4-dfd0-423d-a160-815568f7deb7" path="/var/lib/kubelet/pods/9b6b2ab4-dfd0-423d-a160-815568f7deb7/volumes" Nov 26 14:47:48 crc kubenswrapper[5037]: I1126 14:47:48.920727 5037 scope.go:117] "RemoveContainer" containerID="ce804c960eacc58c7f42b83f833dff6342613db249b38fc647fe7cedf8112368" Nov 26 14:47:48 crc kubenswrapper[5037]: I1126 14:47:48.954099 5037 scope.go:117] "RemoveContainer" containerID="b4375d3a48b5220ef835c2698b095719bcb6787ea1adbe7ef2c0d8398408bf27" Nov 26 14:47:48 crc kubenswrapper[5037]: I1126 14:47:48.991155 5037 scope.go:117] "RemoveContainer" containerID="438c6201d6b1523ba7fbc43efafce89a346d81d24d2c43b9d63c88e3e6ba3ae5" Nov 26 14:47:49 crc kubenswrapper[5037]: I1126 14:47:49.024006 5037 scope.go:117] "RemoveContainer" containerID="6c728b7a4bd6db17ff62032233cd9d220168f2c76bace60a7590b7b669f9d433" Nov 26 14:47:49 crc kubenswrapper[5037]: I1126 14:47:49.055535 5037 scope.go:117] "RemoveContainer" containerID="78b901dad012794cdf465b35ea24402937fd90a8cdaac5e8afdb3f90cb70b5cb" Nov 26 14:47:49 crc kubenswrapper[5037]: I1126 14:47:49.085455 5037 scope.go:117] "RemoveContainer" containerID="94d7a45db71326af06b4b972ad67c72425bf8cc0bf7bea66342eaa2f1e0e7d7b" Nov 26 14:47:49 crc kubenswrapper[5037]: I1126 14:47:49.120474 5037 scope.go:117] "RemoveContainer" containerID="1a37e041cfbebe66b77bc0f18efdcf4b5deb2e9cf0cd0c8a577ccb16cb667c59" Nov 26 14:47:49 crc kubenswrapper[5037]: I1126 14:47:49.160209 5037 scope.go:117] "RemoveContainer" containerID="1ccf73ea43e62a2d000418194aef023e26ee721280485b1329df2b411c630259" Nov 26 14:47:49 crc kubenswrapper[5037]: I1126 14:47:49.193431 5037 scope.go:117] "RemoveContainer" containerID="6d347ebb668ec0c1cc7aacba7d11563feb28e307e522d49a22909a1bea1debad" Nov 26 14:47:49 crc kubenswrapper[5037]: I1126 14:47:49.217115 5037 scope.go:117] "RemoveContainer" containerID="806277a00ac5191908812744ccfa1f0988a36d9c73183097577b0ffa8fa3e35b" Nov 26 14:47:49 crc kubenswrapper[5037]: I1126 14:47:49.253184 5037 scope.go:117] "RemoveContainer" containerID="b49e79776c4c1720c6692646b9ec69e22007400cd025b41070ff9a874d805f29" Nov 26 14:47:49 crc kubenswrapper[5037]: I1126 14:47:49.286346 5037 scope.go:117] "RemoveContainer" containerID="6c5a997620846ace07292574a12b8476eab5ce27fac1929eff03c5cf4273334d" Nov 26 14:47:49 crc kubenswrapper[5037]: I1126 14:47:49.314648 5037 scope.go:117] "RemoveContainer" containerID="6161ba904919c36fc996416a539e7b9532a492480ed49709a6366bb545af200e" Nov 26 14:47:49 crc kubenswrapper[5037]: I1126 14:47:49.352889 5037 scope.go:117] "RemoveContainer" containerID="7479fe1b8683d8b8da52186ee20697aea76b2ba23bac017886f41f497a93218e" Nov 26 14:47:49 crc kubenswrapper[5037]: I1126 14:47:49.395013 5037 scope.go:117] "RemoveContainer" containerID="4dc64289955c86f3b4c372cac08b43c9e8623cbbe4573778b4e74b03bcc33650" Nov 26 14:47:49 crc kubenswrapper[5037]: I1126 14:47:49.437002 5037 scope.go:117] "RemoveContainer" containerID="69f2b0b56cf2f3be40f2a859173b22b913b09b1aec2185348206ae5ef68d4747" Nov 26 14:47:49 crc kubenswrapper[5037]: I1126 14:47:49.493166 5037 scope.go:117] "RemoveContainer" containerID="cf8bcb3dd095d034ddca525521c17df5621fc3fb75c9a732ffd5638bbb28d0ed" Nov 26 14:48:11 crc kubenswrapper[5037]: I1126 14:48:11.247734 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 14:48:11 crc kubenswrapper[5037]: I1126 14:48:11.248603 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 14:48:41 crc kubenswrapper[5037]: I1126 14:48:41.247625 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 14:48:41 crc kubenswrapper[5037]: I1126 14:48:41.250032 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 14:48:49 crc kubenswrapper[5037]: I1126 14:48:49.776138 5037 scope.go:117] "RemoveContainer" containerID="d4490cdc702eb284fb560c1fb61da2d26b6df8afaad2620c0681ff6b028fa449" Nov 26 14:48:49 crc kubenswrapper[5037]: I1126 14:48:49.802349 5037 scope.go:117] "RemoveContainer" containerID="181d04aaba3a5e12256cd74fb1a24c1fbeb6893273bb7e2b686e760d59cbdfc5" Nov 26 14:48:49 crc kubenswrapper[5037]: I1126 14:48:49.857673 5037 scope.go:117] "RemoveContainer" containerID="f7a0d1f48aae5151f234d8d12216b9455594b21fa7a2b6f0992c396c0d1110d5" Nov 26 14:48:49 crc kubenswrapper[5037]: I1126 14:48:49.878208 5037 scope.go:117] "RemoveContainer" containerID="636473aba00f3530f0263e12d2e4b1c92a75ffc63db3c24266a183bce9daaabe" Nov 26 14:49:11 crc kubenswrapper[5037]: I1126 14:49:11.246869 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 14:49:11 crc kubenswrapper[5037]: I1126 14:49:11.247858 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 14:49:11 crc kubenswrapper[5037]: I1126 14:49:11.247938 5037 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" Nov 26 14:49:11 crc kubenswrapper[5037]: I1126 14:49:11.248955 5037 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"86f6ce2549760d5fe019e1ea055aeb6c0bac934ae51bf431e44599a4a8c9c0d2"} pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 14:49:11 crc kubenswrapper[5037]: I1126 14:49:11.249067 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" containerID="cri-o://86f6ce2549760d5fe019e1ea055aeb6c0bac934ae51bf431e44599a4a8c9c0d2" gracePeriod=600 Nov 26 14:49:11 crc kubenswrapper[5037]: E1126 14:49:11.419295 5037 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8bbdf8d8_f2ed_4b76_929a_a1a6c07e85fb.slice/crio-86f6ce2549760d5fe019e1ea055aeb6c0bac934ae51bf431e44599a4a8c9c0d2.scope\": RecentStats: unable to find data in memory cache]" Nov 26 14:49:11 crc kubenswrapper[5037]: I1126 14:49:11.671094 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" event={"ID":"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb","Type":"ContainerDied","Data":"86f6ce2549760d5fe019e1ea055aeb6c0bac934ae51bf431e44599a4a8c9c0d2"} Nov 26 14:49:11 crc kubenswrapper[5037]: I1126 14:49:11.671089 5037 generic.go:334] "Generic (PLEG): container finished" podID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerID="86f6ce2549760d5fe019e1ea055aeb6c0bac934ae51bf431e44599a4a8c9c0d2" exitCode=0 Nov 26 14:49:11 crc kubenswrapper[5037]: I1126 14:49:11.671628 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" event={"ID":"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb","Type":"ContainerStarted","Data":"5da9472de6a1894f3ccce7ec649d181fb0e98eab16a89d18071c1060c850c9e7"} Nov 26 14:49:11 crc kubenswrapper[5037]: I1126 14:49:11.671564 5037 scope.go:117] "RemoveContainer" containerID="5e69d7717514aa68d798cc4f8eee9b2d5d3e9666ca3b110c2cb4c6b90f9e1181" Nov 26 14:49:49 crc kubenswrapper[5037]: I1126 14:49:49.978857 5037 scope.go:117] "RemoveContainer" containerID="ed4a7f81c0c4d4bec4821337a11a3efc648b67a00e6372d401c2ff6c7c2b75e3" Nov 26 14:49:50 crc kubenswrapper[5037]: I1126 14:49:50.025817 5037 scope.go:117] "RemoveContainer" containerID="33dcd0b34b2f2fdf22fdb535aa2524ac7c392d11aebfe3891b1a520355c97e29" Nov 26 14:49:50 crc kubenswrapper[5037]: I1126 14:49:50.045276 5037 scope.go:117] "RemoveContainer" containerID="d79abe361aef8985708638422b648d6c91d88cc2db1ffbf2d1c043eb4548ba88" Nov 26 14:50:50 crc kubenswrapper[5037]: I1126 14:50:50.107319 5037 scope.go:117] "RemoveContainer" containerID="0cbfde75083001a5dadd341f77eb28cdbbc9bc953e4de5164a5c584470961385" Nov 26 14:50:50 crc kubenswrapper[5037]: I1126 14:50:50.125116 5037 scope.go:117] "RemoveContainer" containerID="5edb9e133e8d96c8435dd93ddc03dd31254084b4ce585ca69545dc07a5d56468" Nov 26 14:50:50 crc kubenswrapper[5037]: I1126 14:50:50.154297 5037 scope.go:117] "RemoveContainer" containerID="3343fff4b72744dd55e8f3db651a2d5ac305f3aa671a086e0c10622269957929" Nov 26 14:50:50 crc kubenswrapper[5037]: I1126 14:50:50.177079 5037 scope.go:117] "RemoveContainer" containerID="d1739e90ae050e6e31c02bc769bc3e3f53dba3dae117e4ebfd3699fc77b04e33" Nov 26 14:50:50 crc kubenswrapper[5037]: I1126 14:50:50.203033 5037 scope.go:117] "RemoveContainer" containerID="a374ebe6c9d1ff0211c156c2166fd3bbda729a88ddf0dea44c5645e9f9f331e9" Nov 26 14:51:11 crc kubenswrapper[5037]: I1126 14:51:11.247157 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 14:51:11 crc kubenswrapper[5037]: I1126 14:51:11.247868 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 14:51:41 crc kubenswrapper[5037]: I1126 14:51:41.247915 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 14:51:41 crc kubenswrapper[5037]: I1126 14:51:41.248533 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 14:52:11 crc kubenswrapper[5037]: I1126 14:52:11.247098 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 14:52:11 crc kubenswrapper[5037]: I1126 14:52:11.247666 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 14:52:11 crc kubenswrapper[5037]: I1126 14:52:11.247713 5037 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" Nov 26 14:52:11 crc kubenswrapper[5037]: I1126 14:52:11.248329 5037 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5da9472de6a1894f3ccce7ec649d181fb0e98eab16a89d18071c1060c850c9e7"} pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 14:52:11 crc kubenswrapper[5037]: I1126 14:52:11.248379 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" containerID="cri-o://5da9472de6a1894f3ccce7ec649d181fb0e98eab16a89d18071c1060c850c9e7" gracePeriod=600 Nov 26 14:52:11 crc kubenswrapper[5037]: E1126 14:52:11.438606 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:52:12 crc kubenswrapper[5037]: I1126 14:52:12.357117 5037 generic.go:334] "Generic (PLEG): container finished" podID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerID="5da9472de6a1894f3ccce7ec649d181fb0e98eab16a89d18071c1060c850c9e7" exitCode=0 Nov 26 14:52:12 crc kubenswrapper[5037]: I1126 14:52:12.357211 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" event={"ID":"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb","Type":"ContainerDied","Data":"5da9472de6a1894f3ccce7ec649d181fb0e98eab16a89d18071c1060c850c9e7"} Nov 26 14:52:12 crc kubenswrapper[5037]: I1126 14:52:12.357339 5037 scope.go:117] "RemoveContainer" containerID="86f6ce2549760d5fe019e1ea055aeb6c0bac934ae51bf431e44599a4a8c9c0d2" Nov 26 14:52:12 crc kubenswrapper[5037]: I1126 14:52:12.358463 5037 scope.go:117] "RemoveContainer" containerID="5da9472de6a1894f3ccce7ec649d181fb0e98eab16a89d18071c1060c850c9e7" Nov 26 14:52:12 crc kubenswrapper[5037]: E1126 14:52:12.358999 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:52:25 crc kubenswrapper[5037]: I1126 14:52:25.908420 5037 scope.go:117] "RemoveContainer" containerID="5da9472de6a1894f3ccce7ec649d181fb0e98eab16a89d18071c1060c850c9e7" Nov 26 14:52:25 crc kubenswrapper[5037]: E1126 14:52:25.909241 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:52:40 crc kubenswrapper[5037]: I1126 14:52:40.907758 5037 scope.go:117] "RemoveContainer" containerID="5da9472de6a1894f3ccce7ec649d181fb0e98eab16a89d18071c1060c850c9e7" Nov 26 14:52:40 crc kubenswrapper[5037]: E1126 14:52:40.908633 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:52:55 crc kubenswrapper[5037]: I1126 14:52:55.909229 5037 scope.go:117] "RemoveContainer" containerID="5da9472de6a1894f3ccce7ec649d181fb0e98eab16a89d18071c1060c850c9e7" Nov 26 14:52:55 crc kubenswrapper[5037]: E1126 14:52:55.910478 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:53:09 crc kubenswrapper[5037]: I1126 14:53:09.908518 5037 scope.go:117] "RemoveContainer" containerID="5da9472de6a1894f3ccce7ec649d181fb0e98eab16a89d18071c1060c850c9e7" Nov 26 14:53:09 crc kubenswrapper[5037]: E1126 14:53:09.909458 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:53:20 crc kubenswrapper[5037]: I1126 14:53:20.909188 5037 scope.go:117] "RemoveContainer" containerID="5da9472de6a1894f3ccce7ec649d181fb0e98eab16a89d18071c1060c850c9e7" Nov 26 14:53:20 crc kubenswrapper[5037]: E1126 14:53:20.910402 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:53:33 crc kubenswrapper[5037]: I1126 14:53:33.913361 5037 scope.go:117] "RemoveContainer" containerID="5da9472de6a1894f3ccce7ec649d181fb0e98eab16a89d18071c1060c850c9e7" Nov 26 14:53:33 crc kubenswrapper[5037]: E1126 14:53:33.915574 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:53:43 crc kubenswrapper[5037]: I1126 14:53:43.102744 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-pjjj4"] Nov 26 14:53:43 crc kubenswrapper[5037]: E1126 14:53:43.103658 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b6b2ab4-dfd0-423d-a160-815568f7deb7" containerName="extract-utilities" Nov 26 14:53:43 crc kubenswrapper[5037]: I1126 14:53:43.103675 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b6b2ab4-dfd0-423d-a160-815568f7deb7" containerName="extract-utilities" Nov 26 14:53:43 crc kubenswrapper[5037]: E1126 14:53:43.103692 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b6b2ab4-dfd0-423d-a160-815568f7deb7" containerName="registry-server" Nov 26 14:53:43 crc kubenswrapper[5037]: I1126 14:53:43.103699 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b6b2ab4-dfd0-423d-a160-815568f7deb7" containerName="registry-server" Nov 26 14:53:43 crc kubenswrapper[5037]: E1126 14:53:43.103729 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b6b2ab4-dfd0-423d-a160-815568f7deb7" containerName="extract-content" Nov 26 14:53:43 crc kubenswrapper[5037]: I1126 14:53:43.103737 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b6b2ab4-dfd0-423d-a160-815568f7deb7" containerName="extract-content" Nov 26 14:53:43 crc kubenswrapper[5037]: I1126 14:53:43.103898 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b6b2ab4-dfd0-423d-a160-815568f7deb7" containerName="registry-server" Nov 26 14:53:43 crc kubenswrapper[5037]: I1126 14:53:43.105046 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pjjj4" Nov 26 14:53:43 crc kubenswrapper[5037]: I1126 14:53:43.121168 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pjjj4"] Nov 26 14:53:43 crc kubenswrapper[5037]: I1126 14:53:43.224780 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-88fl5\" (UniqueName: \"kubernetes.io/projected/82fa2a1e-7176-413f-9eb7-ad31f402d326-kube-api-access-88fl5\") pod \"redhat-operators-pjjj4\" (UID: \"82fa2a1e-7176-413f-9eb7-ad31f402d326\") " pod="openshift-marketplace/redhat-operators-pjjj4" Nov 26 14:53:43 crc kubenswrapper[5037]: I1126 14:53:43.224871 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/82fa2a1e-7176-413f-9eb7-ad31f402d326-utilities\") pod \"redhat-operators-pjjj4\" (UID: \"82fa2a1e-7176-413f-9eb7-ad31f402d326\") " pod="openshift-marketplace/redhat-operators-pjjj4" Nov 26 14:53:43 crc kubenswrapper[5037]: I1126 14:53:43.224891 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/82fa2a1e-7176-413f-9eb7-ad31f402d326-catalog-content\") pod \"redhat-operators-pjjj4\" (UID: \"82fa2a1e-7176-413f-9eb7-ad31f402d326\") " pod="openshift-marketplace/redhat-operators-pjjj4" Nov 26 14:53:43 crc kubenswrapper[5037]: I1126 14:53:43.326685 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-88fl5\" (UniqueName: \"kubernetes.io/projected/82fa2a1e-7176-413f-9eb7-ad31f402d326-kube-api-access-88fl5\") pod \"redhat-operators-pjjj4\" (UID: \"82fa2a1e-7176-413f-9eb7-ad31f402d326\") " pod="openshift-marketplace/redhat-operators-pjjj4" Nov 26 14:53:43 crc kubenswrapper[5037]: I1126 14:53:43.326827 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/82fa2a1e-7176-413f-9eb7-ad31f402d326-utilities\") pod \"redhat-operators-pjjj4\" (UID: \"82fa2a1e-7176-413f-9eb7-ad31f402d326\") " pod="openshift-marketplace/redhat-operators-pjjj4" Nov 26 14:53:43 crc kubenswrapper[5037]: I1126 14:53:43.326862 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/82fa2a1e-7176-413f-9eb7-ad31f402d326-catalog-content\") pod \"redhat-operators-pjjj4\" (UID: \"82fa2a1e-7176-413f-9eb7-ad31f402d326\") " pod="openshift-marketplace/redhat-operators-pjjj4" Nov 26 14:53:43 crc kubenswrapper[5037]: I1126 14:53:43.327441 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/82fa2a1e-7176-413f-9eb7-ad31f402d326-utilities\") pod \"redhat-operators-pjjj4\" (UID: \"82fa2a1e-7176-413f-9eb7-ad31f402d326\") " pod="openshift-marketplace/redhat-operators-pjjj4" Nov 26 14:53:43 crc kubenswrapper[5037]: I1126 14:53:43.327490 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/82fa2a1e-7176-413f-9eb7-ad31f402d326-catalog-content\") pod \"redhat-operators-pjjj4\" (UID: \"82fa2a1e-7176-413f-9eb7-ad31f402d326\") " pod="openshift-marketplace/redhat-operators-pjjj4" Nov 26 14:53:43 crc kubenswrapper[5037]: I1126 14:53:43.352446 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-88fl5\" (UniqueName: \"kubernetes.io/projected/82fa2a1e-7176-413f-9eb7-ad31f402d326-kube-api-access-88fl5\") pod \"redhat-operators-pjjj4\" (UID: \"82fa2a1e-7176-413f-9eb7-ad31f402d326\") " pod="openshift-marketplace/redhat-operators-pjjj4" Nov 26 14:53:43 crc kubenswrapper[5037]: I1126 14:53:43.441577 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pjjj4" Nov 26 14:53:43 crc kubenswrapper[5037]: I1126 14:53:43.663967 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pjjj4"] Nov 26 14:53:44 crc kubenswrapper[5037]: I1126 14:53:44.194023 5037 generic.go:334] "Generic (PLEG): container finished" podID="82fa2a1e-7176-413f-9eb7-ad31f402d326" containerID="d156b1b5bd0224a735b613d2f65c4bd632a11a72fc5427972430c3cea5f90b45" exitCode=0 Nov 26 14:53:44 crc kubenswrapper[5037]: I1126 14:53:44.194061 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pjjj4" event={"ID":"82fa2a1e-7176-413f-9eb7-ad31f402d326","Type":"ContainerDied","Data":"d156b1b5bd0224a735b613d2f65c4bd632a11a72fc5427972430c3cea5f90b45"} Nov 26 14:53:44 crc kubenswrapper[5037]: I1126 14:53:44.195475 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pjjj4" event={"ID":"82fa2a1e-7176-413f-9eb7-ad31f402d326","Type":"ContainerStarted","Data":"f51b8b5bcce1b08e7f776e34cc96d56be837c04a5c8a4a6be192000998f8be6e"} Nov 26 14:53:44 crc kubenswrapper[5037]: I1126 14:53:44.195531 5037 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 14:53:46 crc kubenswrapper[5037]: I1126 14:53:46.211266 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pjjj4" event={"ID":"82fa2a1e-7176-413f-9eb7-ad31f402d326","Type":"ContainerStarted","Data":"652ccecb6bd0cf38fae8a086b770de872038eae3574c957ad44498aeea432e73"} Nov 26 14:53:47 crc kubenswrapper[5037]: I1126 14:53:47.220580 5037 generic.go:334] "Generic (PLEG): container finished" podID="82fa2a1e-7176-413f-9eb7-ad31f402d326" containerID="652ccecb6bd0cf38fae8a086b770de872038eae3574c957ad44498aeea432e73" exitCode=0 Nov 26 14:53:47 crc kubenswrapper[5037]: I1126 14:53:47.220633 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pjjj4" event={"ID":"82fa2a1e-7176-413f-9eb7-ad31f402d326","Type":"ContainerDied","Data":"652ccecb6bd0cf38fae8a086b770de872038eae3574c957ad44498aeea432e73"} Nov 26 14:53:48 crc kubenswrapper[5037]: I1126 14:53:48.228324 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pjjj4" event={"ID":"82fa2a1e-7176-413f-9eb7-ad31f402d326","Type":"ContainerStarted","Data":"d4300c1d32222cd1a14275a2aa023cd921907410745e89c343b1ddec1d13ced5"} Nov 26 14:53:48 crc kubenswrapper[5037]: I1126 14:53:48.247368 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-pjjj4" podStartSLOduration=1.5189157340000001 podStartE2EDuration="5.247350019s" podCreationTimestamp="2025-11-26 14:53:43 +0000 UTC" firstStartedPulling="2025-11-26 14:53:44.195301529 +0000 UTC m=+2290.992071703" lastFinishedPulling="2025-11-26 14:53:47.923735784 +0000 UTC m=+2294.720505988" observedRunningTime="2025-11-26 14:53:48.243561657 +0000 UTC m=+2295.040331831" watchObservedRunningTime="2025-11-26 14:53:48.247350019 +0000 UTC m=+2295.044120203" Nov 26 14:53:48 crc kubenswrapper[5037]: I1126 14:53:48.907826 5037 scope.go:117] "RemoveContainer" containerID="5da9472de6a1894f3ccce7ec649d181fb0e98eab16a89d18071c1060c850c9e7" Nov 26 14:53:48 crc kubenswrapper[5037]: E1126 14:53:48.908064 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:53:53 crc kubenswrapper[5037]: I1126 14:53:53.442179 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-pjjj4" Nov 26 14:53:53 crc kubenswrapper[5037]: I1126 14:53:53.442240 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-pjjj4" Nov 26 14:53:54 crc kubenswrapper[5037]: I1126 14:53:54.493874 5037 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pjjj4" podUID="82fa2a1e-7176-413f-9eb7-ad31f402d326" containerName="registry-server" probeResult="failure" output=< Nov 26 14:53:54 crc kubenswrapper[5037]: timeout: failed to connect service ":50051" within 1s Nov 26 14:53:54 crc kubenswrapper[5037]: > Nov 26 14:54:02 crc kubenswrapper[5037]: I1126 14:54:02.908473 5037 scope.go:117] "RemoveContainer" containerID="5da9472de6a1894f3ccce7ec649d181fb0e98eab16a89d18071c1060c850c9e7" Nov 26 14:54:02 crc kubenswrapper[5037]: E1126 14:54:02.911047 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:54:03 crc kubenswrapper[5037]: I1126 14:54:03.514889 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-pjjj4" Nov 26 14:54:03 crc kubenswrapper[5037]: I1126 14:54:03.572710 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-pjjj4" Nov 26 14:54:03 crc kubenswrapper[5037]: I1126 14:54:03.760827 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pjjj4"] Nov 26 14:54:05 crc kubenswrapper[5037]: I1126 14:54:05.384377 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-pjjj4" podUID="82fa2a1e-7176-413f-9eb7-ad31f402d326" containerName="registry-server" containerID="cri-o://d4300c1d32222cd1a14275a2aa023cd921907410745e89c343b1ddec1d13ced5" gracePeriod=2 Nov 26 14:54:06 crc kubenswrapper[5037]: I1126 14:54:06.396239 5037 generic.go:334] "Generic (PLEG): container finished" podID="82fa2a1e-7176-413f-9eb7-ad31f402d326" containerID="d4300c1d32222cd1a14275a2aa023cd921907410745e89c343b1ddec1d13ced5" exitCode=0 Nov 26 14:54:06 crc kubenswrapper[5037]: I1126 14:54:06.396356 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pjjj4" event={"ID":"82fa2a1e-7176-413f-9eb7-ad31f402d326","Type":"ContainerDied","Data":"d4300c1d32222cd1a14275a2aa023cd921907410745e89c343b1ddec1d13ced5"} Nov 26 14:54:07 crc kubenswrapper[5037]: I1126 14:54:07.098114 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pjjj4" Nov 26 14:54:07 crc kubenswrapper[5037]: I1126 14:54:07.202119 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-88fl5\" (UniqueName: \"kubernetes.io/projected/82fa2a1e-7176-413f-9eb7-ad31f402d326-kube-api-access-88fl5\") pod \"82fa2a1e-7176-413f-9eb7-ad31f402d326\" (UID: \"82fa2a1e-7176-413f-9eb7-ad31f402d326\") " Nov 26 14:54:07 crc kubenswrapper[5037]: I1126 14:54:07.202199 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/82fa2a1e-7176-413f-9eb7-ad31f402d326-catalog-content\") pod \"82fa2a1e-7176-413f-9eb7-ad31f402d326\" (UID: \"82fa2a1e-7176-413f-9eb7-ad31f402d326\") " Nov 26 14:54:07 crc kubenswrapper[5037]: I1126 14:54:07.202250 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/82fa2a1e-7176-413f-9eb7-ad31f402d326-utilities\") pod \"82fa2a1e-7176-413f-9eb7-ad31f402d326\" (UID: \"82fa2a1e-7176-413f-9eb7-ad31f402d326\") " Nov 26 14:54:07 crc kubenswrapper[5037]: I1126 14:54:07.203963 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/82fa2a1e-7176-413f-9eb7-ad31f402d326-utilities" (OuterVolumeSpecName: "utilities") pod "82fa2a1e-7176-413f-9eb7-ad31f402d326" (UID: "82fa2a1e-7176-413f-9eb7-ad31f402d326"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:54:07 crc kubenswrapper[5037]: I1126 14:54:07.208113 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82fa2a1e-7176-413f-9eb7-ad31f402d326-kube-api-access-88fl5" (OuterVolumeSpecName: "kube-api-access-88fl5") pod "82fa2a1e-7176-413f-9eb7-ad31f402d326" (UID: "82fa2a1e-7176-413f-9eb7-ad31f402d326"). InnerVolumeSpecName "kube-api-access-88fl5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:54:07 crc kubenswrapper[5037]: I1126 14:54:07.303574 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-88fl5\" (UniqueName: \"kubernetes.io/projected/82fa2a1e-7176-413f-9eb7-ad31f402d326-kube-api-access-88fl5\") on node \"crc\" DevicePath \"\"" Nov 26 14:54:07 crc kubenswrapper[5037]: I1126 14:54:07.303603 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/82fa2a1e-7176-413f-9eb7-ad31f402d326-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 14:54:07 crc kubenswrapper[5037]: I1126 14:54:07.339233 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/82fa2a1e-7176-413f-9eb7-ad31f402d326-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "82fa2a1e-7176-413f-9eb7-ad31f402d326" (UID: "82fa2a1e-7176-413f-9eb7-ad31f402d326"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:54:07 crc kubenswrapper[5037]: I1126 14:54:07.404679 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/82fa2a1e-7176-413f-9eb7-ad31f402d326-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 14:54:07 crc kubenswrapper[5037]: I1126 14:54:07.411164 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pjjj4" event={"ID":"82fa2a1e-7176-413f-9eb7-ad31f402d326","Type":"ContainerDied","Data":"f51b8b5bcce1b08e7f776e34cc96d56be837c04a5c8a4a6be192000998f8be6e"} Nov 26 14:54:07 crc kubenswrapper[5037]: I1126 14:54:07.411227 5037 scope.go:117] "RemoveContainer" containerID="d4300c1d32222cd1a14275a2aa023cd921907410745e89c343b1ddec1d13ced5" Nov 26 14:54:07 crc kubenswrapper[5037]: I1126 14:54:07.411250 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pjjj4" Nov 26 14:54:07 crc kubenswrapper[5037]: I1126 14:54:07.439654 5037 scope.go:117] "RemoveContainer" containerID="652ccecb6bd0cf38fae8a086b770de872038eae3574c957ad44498aeea432e73" Nov 26 14:54:07 crc kubenswrapper[5037]: I1126 14:54:07.464457 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pjjj4"] Nov 26 14:54:07 crc kubenswrapper[5037]: I1126 14:54:07.469505 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-pjjj4"] Nov 26 14:54:07 crc kubenswrapper[5037]: I1126 14:54:07.485256 5037 scope.go:117] "RemoveContainer" containerID="d156b1b5bd0224a735b613d2f65c4bd632a11a72fc5427972430c3cea5f90b45" Nov 26 14:54:07 crc kubenswrapper[5037]: E1126 14:54:07.559936 5037 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod82fa2a1e_7176_413f_9eb7_ad31f402d326.slice/crio-f51b8b5bcce1b08e7f776e34cc96d56be837c04a5c8a4a6be192000998f8be6e\": RecentStats: unable to find data in memory cache]" Nov 26 14:54:07 crc kubenswrapper[5037]: I1126 14:54:07.924781 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="82fa2a1e-7176-413f-9eb7-ad31f402d326" path="/var/lib/kubelet/pods/82fa2a1e-7176-413f-9eb7-ad31f402d326/volumes" Nov 26 14:54:14 crc kubenswrapper[5037]: I1126 14:54:14.908575 5037 scope.go:117] "RemoveContainer" containerID="5da9472de6a1894f3ccce7ec649d181fb0e98eab16a89d18071c1060c850c9e7" Nov 26 14:54:14 crc kubenswrapper[5037]: E1126 14:54:14.910492 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:54:28 crc kubenswrapper[5037]: I1126 14:54:28.908841 5037 scope.go:117] "RemoveContainer" containerID="5da9472de6a1894f3ccce7ec649d181fb0e98eab16a89d18071c1060c850c9e7" Nov 26 14:54:28 crc kubenswrapper[5037]: E1126 14:54:28.909704 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:54:41 crc kubenswrapper[5037]: I1126 14:54:41.908042 5037 scope.go:117] "RemoveContainer" containerID="5da9472de6a1894f3ccce7ec649d181fb0e98eab16a89d18071c1060c850c9e7" Nov 26 14:54:41 crc kubenswrapper[5037]: E1126 14:54:41.908783 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:54:52 crc kubenswrapper[5037]: I1126 14:54:52.908720 5037 scope.go:117] "RemoveContainer" containerID="5da9472de6a1894f3ccce7ec649d181fb0e98eab16a89d18071c1060c850c9e7" Nov 26 14:54:52 crc kubenswrapper[5037]: E1126 14:54:52.909607 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:55:04 crc kubenswrapper[5037]: I1126 14:55:04.908115 5037 scope.go:117] "RemoveContainer" containerID="5da9472de6a1894f3ccce7ec649d181fb0e98eab16a89d18071c1060c850c9e7" Nov 26 14:55:04 crc kubenswrapper[5037]: E1126 14:55:04.908750 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:55:16 crc kubenswrapper[5037]: I1126 14:55:16.908589 5037 scope.go:117] "RemoveContainer" containerID="5da9472de6a1894f3ccce7ec649d181fb0e98eab16a89d18071c1060c850c9e7" Nov 26 14:55:16 crc kubenswrapper[5037]: E1126 14:55:16.910808 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:55:29 crc kubenswrapper[5037]: I1126 14:55:29.908968 5037 scope.go:117] "RemoveContainer" containerID="5da9472de6a1894f3ccce7ec649d181fb0e98eab16a89d18071c1060c850c9e7" Nov 26 14:55:29 crc kubenswrapper[5037]: E1126 14:55:29.909985 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:55:32 crc kubenswrapper[5037]: I1126 14:55:32.014607 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-t5t4q"] Nov 26 14:55:32 crc kubenswrapper[5037]: E1126 14:55:32.016011 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82fa2a1e-7176-413f-9eb7-ad31f402d326" containerName="extract-content" Nov 26 14:55:32 crc kubenswrapper[5037]: I1126 14:55:32.016062 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="82fa2a1e-7176-413f-9eb7-ad31f402d326" containerName="extract-content" Nov 26 14:55:32 crc kubenswrapper[5037]: E1126 14:55:32.022386 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82fa2a1e-7176-413f-9eb7-ad31f402d326" containerName="registry-server" Nov 26 14:55:32 crc kubenswrapper[5037]: I1126 14:55:32.022458 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="82fa2a1e-7176-413f-9eb7-ad31f402d326" containerName="registry-server" Nov 26 14:55:32 crc kubenswrapper[5037]: E1126 14:55:32.022651 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82fa2a1e-7176-413f-9eb7-ad31f402d326" containerName="extract-utilities" Nov 26 14:55:32 crc kubenswrapper[5037]: I1126 14:55:32.022688 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="82fa2a1e-7176-413f-9eb7-ad31f402d326" containerName="extract-utilities" Nov 26 14:55:32 crc kubenswrapper[5037]: I1126 14:55:32.023909 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="82fa2a1e-7176-413f-9eb7-ad31f402d326" containerName="registry-server" Nov 26 14:55:32 crc kubenswrapper[5037]: I1126 14:55:32.033993 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-t5t4q" Nov 26 14:55:32 crc kubenswrapper[5037]: I1126 14:55:32.068836 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jlvjd\" (UniqueName: \"kubernetes.io/projected/9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12-kube-api-access-jlvjd\") pod \"redhat-marketplace-t5t4q\" (UID: \"9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12\") " pod="openshift-marketplace/redhat-marketplace-t5t4q" Nov 26 14:55:32 crc kubenswrapper[5037]: I1126 14:55:32.069012 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12-utilities\") pod \"redhat-marketplace-t5t4q\" (UID: \"9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12\") " pod="openshift-marketplace/redhat-marketplace-t5t4q" Nov 26 14:55:32 crc kubenswrapper[5037]: I1126 14:55:32.069154 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12-catalog-content\") pod \"redhat-marketplace-t5t4q\" (UID: \"9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12\") " pod="openshift-marketplace/redhat-marketplace-t5t4q" Nov 26 14:55:32 crc kubenswrapper[5037]: I1126 14:55:32.077045 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-t5t4q"] Nov 26 14:55:32 crc kubenswrapper[5037]: I1126 14:55:32.172111 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jlvjd\" (UniqueName: \"kubernetes.io/projected/9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12-kube-api-access-jlvjd\") pod \"redhat-marketplace-t5t4q\" (UID: \"9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12\") " pod="openshift-marketplace/redhat-marketplace-t5t4q" Nov 26 14:55:32 crc kubenswrapper[5037]: I1126 14:55:32.172186 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12-utilities\") pod \"redhat-marketplace-t5t4q\" (UID: \"9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12\") " pod="openshift-marketplace/redhat-marketplace-t5t4q" Nov 26 14:55:32 crc kubenswrapper[5037]: I1126 14:55:32.172258 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12-catalog-content\") pod \"redhat-marketplace-t5t4q\" (UID: \"9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12\") " pod="openshift-marketplace/redhat-marketplace-t5t4q" Nov 26 14:55:32 crc kubenswrapper[5037]: I1126 14:55:32.172838 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12-utilities\") pod \"redhat-marketplace-t5t4q\" (UID: \"9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12\") " pod="openshift-marketplace/redhat-marketplace-t5t4q" Nov 26 14:55:32 crc kubenswrapper[5037]: I1126 14:55:32.172925 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12-catalog-content\") pod \"redhat-marketplace-t5t4q\" (UID: \"9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12\") " pod="openshift-marketplace/redhat-marketplace-t5t4q" Nov 26 14:55:32 crc kubenswrapper[5037]: I1126 14:55:32.202218 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jlvjd\" (UniqueName: \"kubernetes.io/projected/9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12-kube-api-access-jlvjd\") pod \"redhat-marketplace-t5t4q\" (UID: \"9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12\") " pod="openshift-marketplace/redhat-marketplace-t5t4q" Nov 26 14:55:32 crc kubenswrapper[5037]: I1126 14:55:32.372677 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-t5t4q" Nov 26 14:55:32 crc kubenswrapper[5037]: I1126 14:55:32.867985 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-t5t4q"] Nov 26 14:55:33 crc kubenswrapper[5037]: I1126 14:55:33.110639 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t5t4q" event={"ID":"9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12","Type":"ContainerStarted","Data":"33bb7fa93b6d8a1accf99f72e824ebf397d14f81370828ceeb086e04cbe9fdee"} Nov 26 14:55:33 crc kubenswrapper[5037]: I1126 14:55:33.110704 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t5t4q" event={"ID":"9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12","Type":"ContainerStarted","Data":"2be4818a3a797eb09d27fbe698c098bfeacaf2bb89c7ff3f71325d33424c6ad4"} Nov 26 14:55:34 crc kubenswrapper[5037]: I1126 14:55:34.126584 5037 generic.go:334] "Generic (PLEG): container finished" podID="9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12" containerID="33bb7fa93b6d8a1accf99f72e824ebf397d14f81370828ceeb086e04cbe9fdee" exitCode=0 Nov 26 14:55:34 crc kubenswrapper[5037]: I1126 14:55:34.131169 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t5t4q" event={"ID":"9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12","Type":"ContainerDied","Data":"33bb7fa93b6d8a1accf99f72e824ebf397d14f81370828ceeb086e04cbe9fdee"} Nov 26 14:55:36 crc kubenswrapper[5037]: I1126 14:55:36.161080 5037 generic.go:334] "Generic (PLEG): container finished" podID="9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12" containerID="c5eff0fa5381bb171ebdc6d0d56c0692c2c6f209e3e64e4d687a867c6cc5eafb" exitCode=0 Nov 26 14:55:36 crc kubenswrapper[5037]: I1126 14:55:36.161144 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t5t4q" event={"ID":"9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12","Type":"ContainerDied","Data":"c5eff0fa5381bb171ebdc6d0d56c0692c2c6f209e3e64e4d687a867c6cc5eafb"} Nov 26 14:55:37 crc kubenswrapper[5037]: I1126 14:55:37.184938 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t5t4q" event={"ID":"9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12","Type":"ContainerStarted","Data":"e8e002c925e42c016f4515fed66c8cf37999aa6f64ed978337758f41150ccc9e"} Nov 26 14:55:37 crc kubenswrapper[5037]: I1126 14:55:37.212080 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-t5t4q" podStartSLOduration=3.514632522 podStartE2EDuration="6.212059473s" podCreationTimestamp="2025-11-26 14:55:31 +0000 UTC" firstStartedPulling="2025-11-26 14:55:34.130241015 +0000 UTC m=+2400.927011229" lastFinishedPulling="2025-11-26 14:55:36.827667956 +0000 UTC m=+2403.624438180" observedRunningTime="2025-11-26 14:55:37.208531588 +0000 UTC m=+2404.005301782" watchObservedRunningTime="2025-11-26 14:55:37.212059473 +0000 UTC m=+2404.008829667" Nov 26 14:55:40 crc kubenswrapper[5037]: I1126 14:55:40.909014 5037 scope.go:117] "RemoveContainer" containerID="5da9472de6a1894f3ccce7ec649d181fb0e98eab16a89d18071c1060c850c9e7" Nov 26 14:55:40 crc kubenswrapper[5037]: E1126 14:55:40.909890 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:55:42 crc kubenswrapper[5037]: I1126 14:55:42.373582 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-t5t4q" Nov 26 14:55:42 crc kubenswrapper[5037]: I1126 14:55:42.373702 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-t5t4q" Nov 26 14:55:42 crc kubenswrapper[5037]: I1126 14:55:42.457575 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-t5t4q" Nov 26 14:55:43 crc kubenswrapper[5037]: I1126 14:55:43.287777 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-t5t4q" Nov 26 14:55:43 crc kubenswrapper[5037]: I1126 14:55:43.344361 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-t5t4q"] Nov 26 14:55:45 crc kubenswrapper[5037]: I1126 14:55:45.254502 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-t5t4q" podUID="9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12" containerName="registry-server" containerID="cri-o://e8e002c925e42c016f4515fed66c8cf37999aa6f64ed978337758f41150ccc9e" gracePeriod=2 Nov 26 14:55:45 crc kubenswrapper[5037]: I1126 14:55:45.759771 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-t5t4q" Nov 26 14:55:45 crc kubenswrapper[5037]: I1126 14:55:45.928928 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12-catalog-content\") pod \"9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12\" (UID: \"9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12\") " Nov 26 14:55:45 crc kubenswrapper[5037]: I1126 14:55:45.929384 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12-utilities\") pod \"9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12\" (UID: \"9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12\") " Nov 26 14:55:45 crc kubenswrapper[5037]: I1126 14:55:45.929487 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jlvjd\" (UniqueName: \"kubernetes.io/projected/9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12-kube-api-access-jlvjd\") pod \"9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12\" (UID: \"9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12\") " Nov 26 14:55:45 crc kubenswrapper[5037]: I1126 14:55:45.931027 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12-utilities" (OuterVolumeSpecName: "utilities") pod "9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12" (UID: "9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:55:45 crc kubenswrapper[5037]: I1126 14:55:45.936307 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12-kube-api-access-jlvjd" (OuterVolumeSpecName: "kube-api-access-jlvjd") pod "9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12" (UID: "9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12"). InnerVolumeSpecName "kube-api-access-jlvjd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:55:45 crc kubenswrapper[5037]: I1126 14:55:45.962147 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12" (UID: "9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:55:46 crc kubenswrapper[5037]: I1126 14:55:46.031510 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 14:55:46 crc kubenswrapper[5037]: I1126 14:55:46.031865 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jlvjd\" (UniqueName: \"kubernetes.io/projected/9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12-kube-api-access-jlvjd\") on node \"crc\" DevicePath \"\"" Nov 26 14:55:46 crc kubenswrapper[5037]: I1126 14:55:46.031884 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 14:55:46 crc kubenswrapper[5037]: I1126 14:55:46.268247 5037 generic.go:334] "Generic (PLEG): container finished" podID="9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12" containerID="e8e002c925e42c016f4515fed66c8cf37999aa6f64ed978337758f41150ccc9e" exitCode=0 Nov 26 14:55:46 crc kubenswrapper[5037]: I1126 14:55:46.268404 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-t5t4q" Nov 26 14:55:46 crc kubenswrapper[5037]: I1126 14:55:46.268437 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t5t4q" event={"ID":"9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12","Type":"ContainerDied","Data":"e8e002c925e42c016f4515fed66c8cf37999aa6f64ed978337758f41150ccc9e"} Nov 26 14:55:46 crc kubenswrapper[5037]: I1126 14:55:46.269597 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t5t4q" event={"ID":"9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12","Type":"ContainerDied","Data":"2be4818a3a797eb09d27fbe698c098bfeacaf2bb89c7ff3f71325d33424c6ad4"} Nov 26 14:55:46 crc kubenswrapper[5037]: I1126 14:55:46.269628 5037 scope.go:117] "RemoveContainer" containerID="e8e002c925e42c016f4515fed66c8cf37999aa6f64ed978337758f41150ccc9e" Nov 26 14:55:46 crc kubenswrapper[5037]: I1126 14:55:46.305639 5037 scope.go:117] "RemoveContainer" containerID="c5eff0fa5381bb171ebdc6d0d56c0692c2c6f209e3e64e4d687a867c6cc5eafb" Nov 26 14:55:46 crc kubenswrapper[5037]: I1126 14:55:46.322013 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-t5t4q"] Nov 26 14:55:46 crc kubenswrapper[5037]: I1126 14:55:46.335683 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-t5t4q"] Nov 26 14:55:46 crc kubenswrapper[5037]: I1126 14:55:46.347904 5037 scope.go:117] "RemoveContainer" containerID="33bb7fa93b6d8a1accf99f72e824ebf397d14f81370828ceeb086e04cbe9fdee" Nov 26 14:55:46 crc kubenswrapper[5037]: I1126 14:55:46.372945 5037 scope.go:117] "RemoveContainer" containerID="e8e002c925e42c016f4515fed66c8cf37999aa6f64ed978337758f41150ccc9e" Nov 26 14:55:46 crc kubenswrapper[5037]: E1126 14:55:46.373723 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e8e002c925e42c016f4515fed66c8cf37999aa6f64ed978337758f41150ccc9e\": container with ID starting with e8e002c925e42c016f4515fed66c8cf37999aa6f64ed978337758f41150ccc9e not found: ID does not exist" containerID="e8e002c925e42c016f4515fed66c8cf37999aa6f64ed978337758f41150ccc9e" Nov 26 14:55:46 crc kubenswrapper[5037]: I1126 14:55:46.373816 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e8e002c925e42c016f4515fed66c8cf37999aa6f64ed978337758f41150ccc9e"} err="failed to get container status \"e8e002c925e42c016f4515fed66c8cf37999aa6f64ed978337758f41150ccc9e\": rpc error: code = NotFound desc = could not find container \"e8e002c925e42c016f4515fed66c8cf37999aa6f64ed978337758f41150ccc9e\": container with ID starting with e8e002c925e42c016f4515fed66c8cf37999aa6f64ed978337758f41150ccc9e not found: ID does not exist" Nov 26 14:55:46 crc kubenswrapper[5037]: I1126 14:55:46.373908 5037 scope.go:117] "RemoveContainer" containerID="c5eff0fa5381bb171ebdc6d0d56c0692c2c6f209e3e64e4d687a867c6cc5eafb" Nov 26 14:55:46 crc kubenswrapper[5037]: E1126 14:55:46.374661 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c5eff0fa5381bb171ebdc6d0d56c0692c2c6f209e3e64e4d687a867c6cc5eafb\": container with ID starting with c5eff0fa5381bb171ebdc6d0d56c0692c2c6f209e3e64e4d687a867c6cc5eafb not found: ID does not exist" containerID="c5eff0fa5381bb171ebdc6d0d56c0692c2c6f209e3e64e4d687a867c6cc5eafb" Nov 26 14:55:46 crc kubenswrapper[5037]: I1126 14:55:46.374745 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c5eff0fa5381bb171ebdc6d0d56c0692c2c6f209e3e64e4d687a867c6cc5eafb"} err="failed to get container status \"c5eff0fa5381bb171ebdc6d0d56c0692c2c6f209e3e64e4d687a867c6cc5eafb\": rpc error: code = NotFound desc = could not find container \"c5eff0fa5381bb171ebdc6d0d56c0692c2c6f209e3e64e4d687a867c6cc5eafb\": container with ID starting with c5eff0fa5381bb171ebdc6d0d56c0692c2c6f209e3e64e4d687a867c6cc5eafb not found: ID does not exist" Nov 26 14:55:46 crc kubenswrapper[5037]: I1126 14:55:46.374797 5037 scope.go:117] "RemoveContainer" containerID="33bb7fa93b6d8a1accf99f72e824ebf397d14f81370828ceeb086e04cbe9fdee" Nov 26 14:55:46 crc kubenswrapper[5037]: E1126 14:55:46.375394 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"33bb7fa93b6d8a1accf99f72e824ebf397d14f81370828ceeb086e04cbe9fdee\": container with ID starting with 33bb7fa93b6d8a1accf99f72e824ebf397d14f81370828ceeb086e04cbe9fdee not found: ID does not exist" containerID="33bb7fa93b6d8a1accf99f72e824ebf397d14f81370828ceeb086e04cbe9fdee" Nov 26 14:55:46 crc kubenswrapper[5037]: I1126 14:55:46.375459 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"33bb7fa93b6d8a1accf99f72e824ebf397d14f81370828ceeb086e04cbe9fdee"} err="failed to get container status \"33bb7fa93b6d8a1accf99f72e824ebf397d14f81370828ceeb086e04cbe9fdee\": rpc error: code = NotFound desc = could not find container \"33bb7fa93b6d8a1accf99f72e824ebf397d14f81370828ceeb086e04cbe9fdee\": container with ID starting with 33bb7fa93b6d8a1accf99f72e824ebf397d14f81370828ceeb086e04cbe9fdee not found: ID does not exist" Nov 26 14:55:47 crc kubenswrapper[5037]: I1126 14:55:47.919764 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12" path="/var/lib/kubelet/pods/9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12/volumes" Nov 26 14:55:51 crc kubenswrapper[5037]: I1126 14:55:51.908516 5037 scope.go:117] "RemoveContainer" containerID="5da9472de6a1894f3ccce7ec649d181fb0e98eab16a89d18071c1060c850c9e7" Nov 26 14:55:51 crc kubenswrapper[5037]: E1126 14:55:51.909089 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:56:06 crc kubenswrapper[5037]: I1126 14:56:06.909503 5037 scope.go:117] "RemoveContainer" containerID="5da9472de6a1894f3ccce7ec649d181fb0e98eab16a89d18071c1060c850c9e7" Nov 26 14:56:06 crc kubenswrapper[5037]: E1126 14:56:06.910783 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:56:07 crc kubenswrapper[5037]: I1126 14:56:07.923591 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-mq55n"] Nov 26 14:56:07 crc kubenswrapper[5037]: E1126 14:56:07.924011 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12" containerName="extract-content" Nov 26 14:56:07 crc kubenswrapper[5037]: I1126 14:56:07.924033 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12" containerName="extract-content" Nov 26 14:56:07 crc kubenswrapper[5037]: E1126 14:56:07.924054 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12" containerName="registry-server" Nov 26 14:56:07 crc kubenswrapper[5037]: I1126 14:56:07.924066 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12" containerName="registry-server" Nov 26 14:56:07 crc kubenswrapper[5037]: E1126 14:56:07.924093 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12" containerName="extract-utilities" Nov 26 14:56:07 crc kubenswrapper[5037]: I1126 14:56:07.924106 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12" containerName="extract-utilities" Nov 26 14:56:07 crc kubenswrapper[5037]: I1126 14:56:07.924416 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ea5fb7e-c4bd-4a16-a6a3-e8a79f703e12" containerName="registry-server" Nov 26 14:56:07 crc kubenswrapper[5037]: I1126 14:56:07.926175 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mq55n" Nov 26 14:56:07 crc kubenswrapper[5037]: I1126 14:56:07.941134 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-mq55n"] Nov 26 14:56:08 crc kubenswrapper[5037]: I1126 14:56:08.089047 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb00813c-c887-4a9f-ab0d-457da0cfedc2-catalog-content\") pod \"certified-operators-mq55n\" (UID: \"eb00813c-c887-4a9f-ab0d-457da0cfedc2\") " pod="openshift-marketplace/certified-operators-mq55n" Nov 26 14:56:08 crc kubenswrapper[5037]: I1126 14:56:08.089607 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb00813c-c887-4a9f-ab0d-457da0cfedc2-utilities\") pod \"certified-operators-mq55n\" (UID: \"eb00813c-c887-4a9f-ab0d-457da0cfedc2\") " pod="openshift-marketplace/certified-operators-mq55n" Nov 26 14:56:08 crc kubenswrapper[5037]: I1126 14:56:08.089640 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-plgkw\" (UniqueName: \"kubernetes.io/projected/eb00813c-c887-4a9f-ab0d-457da0cfedc2-kube-api-access-plgkw\") pod \"certified-operators-mq55n\" (UID: \"eb00813c-c887-4a9f-ab0d-457da0cfedc2\") " pod="openshift-marketplace/certified-operators-mq55n" Nov 26 14:56:08 crc kubenswrapper[5037]: I1126 14:56:08.190611 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb00813c-c887-4a9f-ab0d-457da0cfedc2-utilities\") pod \"certified-operators-mq55n\" (UID: \"eb00813c-c887-4a9f-ab0d-457da0cfedc2\") " pod="openshift-marketplace/certified-operators-mq55n" Nov 26 14:56:08 crc kubenswrapper[5037]: I1126 14:56:08.190669 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-plgkw\" (UniqueName: \"kubernetes.io/projected/eb00813c-c887-4a9f-ab0d-457da0cfedc2-kube-api-access-plgkw\") pod \"certified-operators-mq55n\" (UID: \"eb00813c-c887-4a9f-ab0d-457da0cfedc2\") " pod="openshift-marketplace/certified-operators-mq55n" Nov 26 14:56:08 crc kubenswrapper[5037]: I1126 14:56:08.190735 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb00813c-c887-4a9f-ab0d-457da0cfedc2-catalog-content\") pod \"certified-operators-mq55n\" (UID: \"eb00813c-c887-4a9f-ab0d-457da0cfedc2\") " pod="openshift-marketplace/certified-operators-mq55n" Nov 26 14:56:08 crc kubenswrapper[5037]: I1126 14:56:08.191188 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb00813c-c887-4a9f-ab0d-457da0cfedc2-utilities\") pod \"certified-operators-mq55n\" (UID: \"eb00813c-c887-4a9f-ab0d-457da0cfedc2\") " pod="openshift-marketplace/certified-operators-mq55n" Nov 26 14:56:08 crc kubenswrapper[5037]: I1126 14:56:08.191248 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb00813c-c887-4a9f-ab0d-457da0cfedc2-catalog-content\") pod \"certified-operators-mq55n\" (UID: \"eb00813c-c887-4a9f-ab0d-457da0cfedc2\") " pod="openshift-marketplace/certified-operators-mq55n" Nov 26 14:56:08 crc kubenswrapper[5037]: I1126 14:56:08.212529 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-plgkw\" (UniqueName: \"kubernetes.io/projected/eb00813c-c887-4a9f-ab0d-457da0cfedc2-kube-api-access-plgkw\") pod \"certified-operators-mq55n\" (UID: \"eb00813c-c887-4a9f-ab0d-457da0cfedc2\") " pod="openshift-marketplace/certified-operators-mq55n" Nov 26 14:56:08 crc kubenswrapper[5037]: I1126 14:56:08.258163 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mq55n" Nov 26 14:56:08 crc kubenswrapper[5037]: I1126 14:56:08.772678 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-mq55n"] Nov 26 14:56:09 crc kubenswrapper[5037]: I1126 14:56:09.483823 5037 generic.go:334] "Generic (PLEG): container finished" podID="eb00813c-c887-4a9f-ab0d-457da0cfedc2" containerID="85da4e57004ffb2d7329e81721719ac4b164665e3d38a1f54527bb58ee01054d" exitCode=0 Nov 26 14:56:09 crc kubenswrapper[5037]: I1126 14:56:09.483879 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mq55n" event={"ID":"eb00813c-c887-4a9f-ab0d-457da0cfedc2","Type":"ContainerDied","Data":"85da4e57004ffb2d7329e81721719ac4b164665e3d38a1f54527bb58ee01054d"} Nov 26 14:56:09 crc kubenswrapper[5037]: I1126 14:56:09.484181 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mq55n" event={"ID":"eb00813c-c887-4a9f-ab0d-457da0cfedc2","Type":"ContainerStarted","Data":"ed92fd61775c594f89fdbf82c2e7580f526058a6f8b429bfae7344a1a699a01a"} Nov 26 14:56:10 crc kubenswrapper[5037]: I1126 14:56:10.492931 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mq55n" event={"ID":"eb00813c-c887-4a9f-ab0d-457da0cfedc2","Type":"ContainerStarted","Data":"5be3756d7d51b192d04eda0ef9e881bf8aac723135f723acbe1f72232f2d5773"} Nov 26 14:56:11 crc kubenswrapper[5037]: I1126 14:56:11.504642 5037 generic.go:334] "Generic (PLEG): container finished" podID="eb00813c-c887-4a9f-ab0d-457da0cfedc2" containerID="5be3756d7d51b192d04eda0ef9e881bf8aac723135f723acbe1f72232f2d5773" exitCode=0 Nov 26 14:56:11 crc kubenswrapper[5037]: I1126 14:56:11.504706 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mq55n" event={"ID":"eb00813c-c887-4a9f-ab0d-457da0cfedc2","Type":"ContainerDied","Data":"5be3756d7d51b192d04eda0ef9e881bf8aac723135f723acbe1f72232f2d5773"} Nov 26 14:56:12 crc kubenswrapper[5037]: I1126 14:56:12.517254 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mq55n" event={"ID":"eb00813c-c887-4a9f-ab0d-457da0cfedc2","Type":"ContainerStarted","Data":"c4b3693551e04130740848703b1cfe5db6b19453e2475b209e1a79db73185462"} Nov 26 14:56:12 crc kubenswrapper[5037]: I1126 14:56:12.532285 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-mq55n" podStartSLOduration=3.036278865 podStartE2EDuration="5.532267337s" podCreationTimestamp="2025-11-26 14:56:07 +0000 UTC" firstStartedPulling="2025-11-26 14:56:09.486376205 +0000 UTC m=+2436.283146399" lastFinishedPulling="2025-11-26 14:56:11.982364647 +0000 UTC m=+2438.779134871" observedRunningTime="2025-11-26 14:56:12.531026907 +0000 UTC m=+2439.327797101" watchObservedRunningTime="2025-11-26 14:56:12.532267337 +0000 UTC m=+2439.329037521" Nov 26 14:56:18 crc kubenswrapper[5037]: I1126 14:56:18.259860 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-mq55n" Nov 26 14:56:18 crc kubenswrapper[5037]: I1126 14:56:18.260339 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-mq55n" Nov 26 14:56:18 crc kubenswrapper[5037]: I1126 14:56:18.307075 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-mq55n" Nov 26 14:56:18 crc kubenswrapper[5037]: I1126 14:56:18.635980 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-mq55n" Nov 26 14:56:18 crc kubenswrapper[5037]: I1126 14:56:18.695249 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-mq55n"] Nov 26 14:56:20 crc kubenswrapper[5037]: I1126 14:56:20.579966 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-mq55n" podUID="eb00813c-c887-4a9f-ab0d-457da0cfedc2" containerName="registry-server" containerID="cri-o://c4b3693551e04130740848703b1cfe5db6b19453e2475b209e1a79db73185462" gracePeriod=2 Nov 26 14:56:21 crc kubenswrapper[5037]: I1126 14:56:21.096958 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mq55n" Nov 26 14:56:21 crc kubenswrapper[5037]: I1126 14:56:21.188694 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb00813c-c887-4a9f-ab0d-457da0cfedc2-utilities\") pod \"eb00813c-c887-4a9f-ab0d-457da0cfedc2\" (UID: \"eb00813c-c887-4a9f-ab0d-457da0cfedc2\") " Nov 26 14:56:21 crc kubenswrapper[5037]: I1126 14:56:21.188777 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-plgkw\" (UniqueName: \"kubernetes.io/projected/eb00813c-c887-4a9f-ab0d-457da0cfedc2-kube-api-access-plgkw\") pod \"eb00813c-c887-4a9f-ab0d-457da0cfedc2\" (UID: \"eb00813c-c887-4a9f-ab0d-457da0cfedc2\") " Nov 26 14:56:21 crc kubenswrapper[5037]: I1126 14:56:21.188816 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb00813c-c887-4a9f-ab0d-457da0cfedc2-catalog-content\") pod \"eb00813c-c887-4a9f-ab0d-457da0cfedc2\" (UID: \"eb00813c-c887-4a9f-ab0d-457da0cfedc2\") " Nov 26 14:56:21 crc kubenswrapper[5037]: I1126 14:56:21.190334 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eb00813c-c887-4a9f-ab0d-457da0cfedc2-utilities" (OuterVolumeSpecName: "utilities") pod "eb00813c-c887-4a9f-ab0d-457da0cfedc2" (UID: "eb00813c-c887-4a9f-ab0d-457da0cfedc2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:56:21 crc kubenswrapper[5037]: I1126 14:56:21.198728 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb00813c-c887-4a9f-ab0d-457da0cfedc2-kube-api-access-plgkw" (OuterVolumeSpecName: "kube-api-access-plgkw") pod "eb00813c-c887-4a9f-ab0d-457da0cfedc2" (UID: "eb00813c-c887-4a9f-ab0d-457da0cfedc2"). InnerVolumeSpecName "kube-api-access-plgkw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:56:21 crc kubenswrapper[5037]: I1126 14:56:21.274266 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eb00813c-c887-4a9f-ab0d-457da0cfedc2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "eb00813c-c887-4a9f-ab0d-457da0cfedc2" (UID: "eb00813c-c887-4a9f-ab0d-457da0cfedc2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:56:21 crc kubenswrapper[5037]: I1126 14:56:21.290319 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb00813c-c887-4a9f-ab0d-457da0cfedc2-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 14:56:21 crc kubenswrapper[5037]: I1126 14:56:21.290349 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-plgkw\" (UniqueName: \"kubernetes.io/projected/eb00813c-c887-4a9f-ab0d-457da0cfedc2-kube-api-access-plgkw\") on node \"crc\" DevicePath \"\"" Nov 26 14:56:21 crc kubenswrapper[5037]: I1126 14:56:21.290367 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb00813c-c887-4a9f-ab0d-457da0cfedc2-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 14:56:21 crc kubenswrapper[5037]: I1126 14:56:21.589927 5037 generic.go:334] "Generic (PLEG): container finished" podID="eb00813c-c887-4a9f-ab0d-457da0cfedc2" containerID="c4b3693551e04130740848703b1cfe5db6b19453e2475b209e1a79db73185462" exitCode=0 Nov 26 14:56:21 crc kubenswrapper[5037]: I1126 14:56:21.589973 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mq55n" event={"ID":"eb00813c-c887-4a9f-ab0d-457da0cfedc2","Type":"ContainerDied","Data":"c4b3693551e04130740848703b1cfe5db6b19453e2475b209e1a79db73185462"} Nov 26 14:56:21 crc kubenswrapper[5037]: I1126 14:56:21.590005 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mq55n" Nov 26 14:56:21 crc kubenswrapper[5037]: I1126 14:56:21.590023 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mq55n" event={"ID":"eb00813c-c887-4a9f-ab0d-457da0cfedc2","Type":"ContainerDied","Data":"ed92fd61775c594f89fdbf82c2e7580f526058a6f8b429bfae7344a1a699a01a"} Nov 26 14:56:21 crc kubenswrapper[5037]: I1126 14:56:21.590045 5037 scope.go:117] "RemoveContainer" containerID="c4b3693551e04130740848703b1cfe5db6b19453e2475b209e1a79db73185462" Nov 26 14:56:21 crc kubenswrapper[5037]: I1126 14:56:21.615016 5037 scope.go:117] "RemoveContainer" containerID="5be3756d7d51b192d04eda0ef9e881bf8aac723135f723acbe1f72232f2d5773" Nov 26 14:56:21 crc kubenswrapper[5037]: I1126 14:56:21.625677 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-mq55n"] Nov 26 14:56:21 crc kubenswrapper[5037]: I1126 14:56:21.632810 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-mq55n"] Nov 26 14:56:21 crc kubenswrapper[5037]: I1126 14:56:21.646465 5037 scope.go:117] "RemoveContainer" containerID="85da4e57004ffb2d7329e81721719ac4b164665e3d38a1f54527bb58ee01054d" Nov 26 14:56:21 crc kubenswrapper[5037]: I1126 14:56:21.667777 5037 scope.go:117] "RemoveContainer" containerID="c4b3693551e04130740848703b1cfe5db6b19453e2475b209e1a79db73185462" Nov 26 14:56:21 crc kubenswrapper[5037]: E1126 14:56:21.668151 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c4b3693551e04130740848703b1cfe5db6b19453e2475b209e1a79db73185462\": container with ID starting with c4b3693551e04130740848703b1cfe5db6b19453e2475b209e1a79db73185462 not found: ID does not exist" containerID="c4b3693551e04130740848703b1cfe5db6b19453e2475b209e1a79db73185462" Nov 26 14:56:21 crc kubenswrapper[5037]: I1126 14:56:21.668270 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c4b3693551e04130740848703b1cfe5db6b19453e2475b209e1a79db73185462"} err="failed to get container status \"c4b3693551e04130740848703b1cfe5db6b19453e2475b209e1a79db73185462\": rpc error: code = NotFound desc = could not find container \"c4b3693551e04130740848703b1cfe5db6b19453e2475b209e1a79db73185462\": container with ID starting with c4b3693551e04130740848703b1cfe5db6b19453e2475b209e1a79db73185462 not found: ID does not exist" Nov 26 14:56:21 crc kubenswrapper[5037]: I1126 14:56:21.668307 5037 scope.go:117] "RemoveContainer" containerID="5be3756d7d51b192d04eda0ef9e881bf8aac723135f723acbe1f72232f2d5773" Nov 26 14:56:21 crc kubenswrapper[5037]: E1126 14:56:21.668732 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5be3756d7d51b192d04eda0ef9e881bf8aac723135f723acbe1f72232f2d5773\": container with ID starting with 5be3756d7d51b192d04eda0ef9e881bf8aac723135f723acbe1f72232f2d5773 not found: ID does not exist" containerID="5be3756d7d51b192d04eda0ef9e881bf8aac723135f723acbe1f72232f2d5773" Nov 26 14:56:21 crc kubenswrapper[5037]: I1126 14:56:21.668751 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5be3756d7d51b192d04eda0ef9e881bf8aac723135f723acbe1f72232f2d5773"} err="failed to get container status \"5be3756d7d51b192d04eda0ef9e881bf8aac723135f723acbe1f72232f2d5773\": rpc error: code = NotFound desc = could not find container \"5be3756d7d51b192d04eda0ef9e881bf8aac723135f723acbe1f72232f2d5773\": container with ID starting with 5be3756d7d51b192d04eda0ef9e881bf8aac723135f723acbe1f72232f2d5773 not found: ID does not exist" Nov 26 14:56:21 crc kubenswrapper[5037]: I1126 14:56:21.668781 5037 scope.go:117] "RemoveContainer" containerID="85da4e57004ffb2d7329e81721719ac4b164665e3d38a1f54527bb58ee01054d" Nov 26 14:56:21 crc kubenswrapper[5037]: E1126 14:56:21.668995 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"85da4e57004ffb2d7329e81721719ac4b164665e3d38a1f54527bb58ee01054d\": container with ID starting with 85da4e57004ffb2d7329e81721719ac4b164665e3d38a1f54527bb58ee01054d not found: ID does not exist" containerID="85da4e57004ffb2d7329e81721719ac4b164665e3d38a1f54527bb58ee01054d" Nov 26 14:56:21 crc kubenswrapper[5037]: I1126 14:56:21.669031 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"85da4e57004ffb2d7329e81721719ac4b164665e3d38a1f54527bb58ee01054d"} err="failed to get container status \"85da4e57004ffb2d7329e81721719ac4b164665e3d38a1f54527bb58ee01054d\": rpc error: code = NotFound desc = could not find container \"85da4e57004ffb2d7329e81721719ac4b164665e3d38a1f54527bb58ee01054d\": container with ID starting with 85da4e57004ffb2d7329e81721719ac4b164665e3d38a1f54527bb58ee01054d not found: ID does not exist" Nov 26 14:56:21 crc kubenswrapper[5037]: I1126 14:56:21.908885 5037 scope.go:117] "RemoveContainer" containerID="5da9472de6a1894f3ccce7ec649d181fb0e98eab16a89d18071c1060c850c9e7" Nov 26 14:56:21 crc kubenswrapper[5037]: E1126 14:56:21.909623 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:56:21 crc kubenswrapper[5037]: I1126 14:56:21.919102 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eb00813c-c887-4a9f-ab0d-457da0cfedc2" path="/var/lib/kubelet/pods/eb00813c-c887-4a9f-ab0d-457da0cfedc2/volumes" Nov 26 14:56:36 crc kubenswrapper[5037]: I1126 14:56:36.908634 5037 scope.go:117] "RemoveContainer" containerID="5da9472de6a1894f3ccce7ec649d181fb0e98eab16a89d18071c1060c850c9e7" Nov 26 14:56:36 crc kubenswrapper[5037]: E1126 14:56:36.909622 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:56:47 crc kubenswrapper[5037]: I1126 14:56:47.909018 5037 scope.go:117] "RemoveContainer" containerID="5da9472de6a1894f3ccce7ec649d181fb0e98eab16a89d18071c1060c850c9e7" Nov 26 14:56:47 crc kubenswrapper[5037]: E1126 14:56:47.911009 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:56:54 crc kubenswrapper[5037]: I1126 14:56:54.048748 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-llzzn"] Nov 26 14:56:54 crc kubenswrapper[5037]: E1126 14:56:54.049390 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb00813c-c887-4a9f-ab0d-457da0cfedc2" containerName="extract-utilities" Nov 26 14:56:54 crc kubenswrapper[5037]: I1126 14:56:54.049402 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb00813c-c887-4a9f-ab0d-457da0cfedc2" containerName="extract-utilities" Nov 26 14:56:54 crc kubenswrapper[5037]: E1126 14:56:54.049417 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb00813c-c887-4a9f-ab0d-457da0cfedc2" containerName="registry-server" Nov 26 14:56:54 crc kubenswrapper[5037]: I1126 14:56:54.049423 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb00813c-c887-4a9f-ab0d-457da0cfedc2" containerName="registry-server" Nov 26 14:56:54 crc kubenswrapper[5037]: E1126 14:56:54.049438 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb00813c-c887-4a9f-ab0d-457da0cfedc2" containerName="extract-content" Nov 26 14:56:54 crc kubenswrapper[5037]: I1126 14:56:54.049444 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb00813c-c887-4a9f-ab0d-457da0cfedc2" containerName="extract-content" Nov 26 14:56:54 crc kubenswrapper[5037]: I1126 14:56:54.049583 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb00813c-c887-4a9f-ab0d-457da0cfedc2" containerName="registry-server" Nov 26 14:56:54 crc kubenswrapper[5037]: I1126 14:56:54.050931 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-llzzn" Nov 26 14:56:54 crc kubenswrapper[5037]: I1126 14:56:54.062037 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-llzzn"] Nov 26 14:56:54 crc kubenswrapper[5037]: I1126 14:56:54.106384 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/69c81dd8-253e-4f54-bf2d-68828864c7cd-utilities\") pod \"community-operators-llzzn\" (UID: \"69c81dd8-253e-4f54-bf2d-68828864c7cd\") " pod="openshift-marketplace/community-operators-llzzn" Nov 26 14:56:54 crc kubenswrapper[5037]: I1126 14:56:54.106442 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/69c81dd8-253e-4f54-bf2d-68828864c7cd-catalog-content\") pod \"community-operators-llzzn\" (UID: \"69c81dd8-253e-4f54-bf2d-68828864c7cd\") " pod="openshift-marketplace/community-operators-llzzn" Nov 26 14:56:54 crc kubenswrapper[5037]: I1126 14:56:54.106560 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f2kfn\" (UniqueName: \"kubernetes.io/projected/69c81dd8-253e-4f54-bf2d-68828864c7cd-kube-api-access-f2kfn\") pod \"community-operators-llzzn\" (UID: \"69c81dd8-253e-4f54-bf2d-68828864c7cd\") " pod="openshift-marketplace/community-operators-llzzn" Nov 26 14:56:54 crc kubenswrapper[5037]: I1126 14:56:54.208111 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/69c81dd8-253e-4f54-bf2d-68828864c7cd-utilities\") pod \"community-operators-llzzn\" (UID: \"69c81dd8-253e-4f54-bf2d-68828864c7cd\") " pod="openshift-marketplace/community-operators-llzzn" Nov 26 14:56:54 crc kubenswrapper[5037]: I1126 14:56:54.208209 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/69c81dd8-253e-4f54-bf2d-68828864c7cd-catalog-content\") pod \"community-operators-llzzn\" (UID: \"69c81dd8-253e-4f54-bf2d-68828864c7cd\") " pod="openshift-marketplace/community-operators-llzzn" Nov 26 14:56:54 crc kubenswrapper[5037]: I1126 14:56:54.208402 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f2kfn\" (UniqueName: \"kubernetes.io/projected/69c81dd8-253e-4f54-bf2d-68828864c7cd-kube-api-access-f2kfn\") pod \"community-operators-llzzn\" (UID: \"69c81dd8-253e-4f54-bf2d-68828864c7cd\") " pod="openshift-marketplace/community-operators-llzzn" Nov 26 14:56:54 crc kubenswrapper[5037]: I1126 14:56:54.208972 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/69c81dd8-253e-4f54-bf2d-68828864c7cd-catalog-content\") pod \"community-operators-llzzn\" (UID: \"69c81dd8-253e-4f54-bf2d-68828864c7cd\") " pod="openshift-marketplace/community-operators-llzzn" Nov 26 14:56:54 crc kubenswrapper[5037]: I1126 14:56:54.209013 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/69c81dd8-253e-4f54-bf2d-68828864c7cd-utilities\") pod \"community-operators-llzzn\" (UID: \"69c81dd8-253e-4f54-bf2d-68828864c7cd\") " pod="openshift-marketplace/community-operators-llzzn" Nov 26 14:56:54 crc kubenswrapper[5037]: I1126 14:56:54.241944 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f2kfn\" (UniqueName: \"kubernetes.io/projected/69c81dd8-253e-4f54-bf2d-68828864c7cd-kube-api-access-f2kfn\") pod \"community-operators-llzzn\" (UID: \"69c81dd8-253e-4f54-bf2d-68828864c7cd\") " pod="openshift-marketplace/community-operators-llzzn" Nov 26 14:56:54 crc kubenswrapper[5037]: I1126 14:56:54.387813 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-llzzn" Nov 26 14:56:54 crc kubenswrapper[5037]: I1126 14:56:54.704412 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-llzzn"] Nov 26 14:56:54 crc kubenswrapper[5037]: I1126 14:56:54.914626 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-llzzn" event={"ID":"69c81dd8-253e-4f54-bf2d-68828864c7cd","Type":"ContainerStarted","Data":"27127e334cfdcaaea7cbaf72a9b8e09ebf1e347caaedfcc1b012ec169e8b8113"} Nov 26 14:56:55 crc kubenswrapper[5037]: I1126 14:56:55.939674 5037 generic.go:334] "Generic (PLEG): container finished" podID="69c81dd8-253e-4f54-bf2d-68828864c7cd" containerID="6e5172b592ffdaf6e02adf11d68981307f8b764a6394cf80c9ccb5749dba0b2e" exitCode=0 Nov 26 14:56:55 crc kubenswrapper[5037]: I1126 14:56:55.940275 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-llzzn" event={"ID":"69c81dd8-253e-4f54-bf2d-68828864c7cd","Type":"ContainerDied","Data":"6e5172b592ffdaf6e02adf11d68981307f8b764a6394cf80c9ccb5749dba0b2e"} Nov 26 14:56:58 crc kubenswrapper[5037]: I1126 14:56:58.908160 5037 scope.go:117] "RemoveContainer" containerID="5da9472de6a1894f3ccce7ec649d181fb0e98eab16a89d18071c1060c850c9e7" Nov 26 14:56:58 crc kubenswrapper[5037]: E1126 14:56:58.909120 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:57:00 crc kubenswrapper[5037]: I1126 14:57:00.984647 5037 generic.go:334] "Generic (PLEG): container finished" podID="69c81dd8-253e-4f54-bf2d-68828864c7cd" containerID="8972b27a4518cd740d54064e07265937b8aa7371478931ce0e2302e5df5ac453" exitCode=0 Nov 26 14:57:00 crc kubenswrapper[5037]: I1126 14:57:00.984753 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-llzzn" event={"ID":"69c81dd8-253e-4f54-bf2d-68828864c7cd","Type":"ContainerDied","Data":"8972b27a4518cd740d54064e07265937b8aa7371478931ce0e2302e5df5ac453"} Nov 26 14:57:01 crc kubenswrapper[5037]: I1126 14:57:01.995575 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-llzzn" event={"ID":"69c81dd8-253e-4f54-bf2d-68828864c7cd","Type":"ContainerStarted","Data":"230334f503e4f60713b8c05154fd1c3ed5a236985f7f9e4b63e9dbcb1497f54d"} Nov 26 14:57:02 crc kubenswrapper[5037]: I1126 14:57:02.027476 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-llzzn" podStartSLOduration=2.547487535 podStartE2EDuration="8.02745014s" podCreationTimestamp="2025-11-26 14:56:54 +0000 UTC" firstStartedPulling="2025-11-26 14:56:55.94407328 +0000 UTC m=+2482.740843464" lastFinishedPulling="2025-11-26 14:57:01.424035875 +0000 UTC m=+2488.220806069" observedRunningTime="2025-11-26 14:57:02.018632666 +0000 UTC m=+2488.815402890" watchObservedRunningTime="2025-11-26 14:57:02.02745014 +0000 UTC m=+2488.824220354" Nov 26 14:57:04 crc kubenswrapper[5037]: I1126 14:57:04.389027 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-llzzn" Nov 26 14:57:04 crc kubenswrapper[5037]: I1126 14:57:04.389433 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-llzzn" Nov 26 14:57:04 crc kubenswrapper[5037]: I1126 14:57:04.449782 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-llzzn" Nov 26 14:57:09 crc kubenswrapper[5037]: I1126 14:57:09.908654 5037 scope.go:117] "RemoveContainer" containerID="5da9472de6a1894f3ccce7ec649d181fb0e98eab16a89d18071c1060c850c9e7" Nov 26 14:57:09 crc kubenswrapper[5037]: E1126 14:57:09.909343 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 14:57:14 crc kubenswrapper[5037]: I1126 14:57:14.463528 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-llzzn" Nov 26 14:57:14 crc kubenswrapper[5037]: I1126 14:57:14.542189 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-llzzn"] Nov 26 14:57:14 crc kubenswrapper[5037]: I1126 14:57:14.593350 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fvms6"] Nov 26 14:57:14 crc kubenswrapper[5037]: I1126 14:57:14.593760 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-fvms6" podUID="f80c86bc-3752-494b-baa4-07549c0c183c" containerName="registry-server" containerID="cri-o://da18f09819ed97e63ae4b76a6a0a364dd921b07937d6228515929a3c19735639" gracePeriod=2 Nov 26 14:57:15 crc kubenswrapper[5037]: E1126 14:57:15.466823 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of da18f09819ed97e63ae4b76a6a0a364dd921b07937d6228515929a3c19735639 is running failed: container process not found" containerID="da18f09819ed97e63ae4b76a6a0a364dd921b07937d6228515929a3c19735639" cmd=["grpc_health_probe","-addr=:50051"] Nov 26 14:57:15 crc kubenswrapper[5037]: E1126 14:57:15.467522 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of da18f09819ed97e63ae4b76a6a0a364dd921b07937d6228515929a3c19735639 is running failed: container process not found" containerID="da18f09819ed97e63ae4b76a6a0a364dd921b07937d6228515929a3c19735639" cmd=["grpc_health_probe","-addr=:50051"] Nov 26 14:57:15 crc kubenswrapper[5037]: E1126 14:57:15.467893 5037 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of da18f09819ed97e63ae4b76a6a0a364dd921b07937d6228515929a3c19735639 is running failed: container process not found" containerID="da18f09819ed97e63ae4b76a6a0a364dd921b07937d6228515929a3c19735639" cmd=["grpc_health_probe","-addr=:50051"] Nov 26 14:57:15 crc kubenswrapper[5037]: E1126 14:57:15.467966 5037 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of da18f09819ed97e63ae4b76a6a0a364dd921b07937d6228515929a3c19735639 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/community-operators-fvms6" podUID="f80c86bc-3752-494b-baa4-07549c0c183c" containerName="registry-server" Nov 26 14:57:16 crc kubenswrapper[5037]: I1126 14:57:16.134539 5037 generic.go:334] "Generic (PLEG): container finished" podID="f80c86bc-3752-494b-baa4-07549c0c183c" containerID="da18f09819ed97e63ae4b76a6a0a364dd921b07937d6228515929a3c19735639" exitCode=0 Nov 26 14:57:16 crc kubenswrapper[5037]: I1126 14:57:16.134630 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fvms6" event={"ID":"f80c86bc-3752-494b-baa4-07549c0c183c","Type":"ContainerDied","Data":"da18f09819ed97e63ae4b76a6a0a364dd921b07937d6228515929a3c19735639"} Nov 26 14:57:16 crc kubenswrapper[5037]: I1126 14:57:16.430091 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fvms6" Nov 26 14:57:16 crc kubenswrapper[5037]: I1126 14:57:16.561586 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f80c86bc-3752-494b-baa4-07549c0c183c-utilities\") pod \"f80c86bc-3752-494b-baa4-07549c0c183c\" (UID: \"f80c86bc-3752-494b-baa4-07549c0c183c\") " Nov 26 14:57:16 crc kubenswrapper[5037]: I1126 14:57:16.561700 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f80c86bc-3752-494b-baa4-07549c0c183c-catalog-content\") pod \"f80c86bc-3752-494b-baa4-07549c0c183c\" (UID: \"f80c86bc-3752-494b-baa4-07549c0c183c\") " Nov 26 14:57:16 crc kubenswrapper[5037]: I1126 14:57:16.561742 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bgtcw\" (UniqueName: \"kubernetes.io/projected/f80c86bc-3752-494b-baa4-07549c0c183c-kube-api-access-bgtcw\") pod \"f80c86bc-3752-494b-baa4-07549c0c183c\" (UID: \"f80c86bc-3752-494b-baa4-07549c0c183c\") " Nov 26 14:57:16 crc kubenswrapper[5037]: I1126 14:57:16.562477 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f80c86bc-3752-494b-baa4-07549c0c183c-utilities" (OuterVolumeSpecName: "utilities") pod "f80c86bc-3752-494b-baa4-07549c0c183c" (UID: "f80c86bc-3752-494b-baa4-07549c0c183c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:57:16 crc kubenswrapper[5037]: I1126 14:57:16.566844 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f80c86bc-3752-494b-baa4-07549c0c183c-kube-api-access-bgtcw" (OuterVolumeSpecName: "kube-api-access-bgtcw") pod "f80c86bc-3752-494b-baa4-07549c0c183c" (UID: "f80c86bc-3752-494b-baa4-07549c0c183c"). InnerVolumeSpecName "kube-api-access-bgtcw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 14:57:16 crc kubenswrapper[5037]: I1126 14:57:16.606135 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f80c86bc-3752-494b-baa4-07549c0c183c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f80c86bc-3752-494b-baa4-07549c0c183c" (UID: "f80c86bc-3752-494b-baa4-07549c0c183c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 14:57:16 crc kubenswrapper[5037]: I1126 14:57:16.664247 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f80c86bc-3752-494b-baa4-07549c0c183c-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 14:57:16 crc kubenswrapper[5037]: I1126 14:57:16.664328 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f80c86bc-3752-494b-baa4-07549c0c183c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 14:57:16 crc kubenswrapper[5037]: I1126 14:57:16.664348 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bgtcw\" (UniqueName: \"kubernetes.io/projected/f80c86bc-3752-494b-baa4-07549c0c183c-kube-api-access-bgtcw\") on node \"crc\" DevicePath \"\"" Nov 26 14:57:17 crc kubenswrapper[5037]: I1126 14:57:17.145097 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fvms6" event={"ID":"f80c86bc-3752-494b-baa4-07549c0c183c","Type":"ContainerDied","Data":"a0ab9679c76ee24defd57ce92d09b09455da201b534d71c26875db1af37c4388"} Nov 26 14:57:17 crc kubenswrapper[5037]: I1126 14:57:17.145346 5037 scope.go:117] "RemoveContainer" containerID="da18f09819ed97e63ae4b76a6a0a364dd921b07937d6228515929a3c19735639" Nov 26 14:57:17 crc kubenswrapper[5037]: I1126 14:57:17.145177 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fvms6" Nov 26 14:57:17 crc kubenswrapper[5037]: I1126 14:57:17.166519 5037 scope.go:117] "RemoveContainer" containerID="3a21ef9438d9ddfb8b34f259c1d3002f316cfb26c8d7b9bbac4c363c310d9387" Nov 26 14:57:17 crc kubenswrapper[5037]: I1126 14:57:17.173755 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fvms6"] Nov 26 14:57:17 crc kubenswrapper[5037]: I1126 14:57:17.179084 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-fvms6"] Nov 26 14:57:17 crc kubenswrapper[5037]: I1126 14:57:17.194906 5037 scope.go:117] "RemoveContainer" containerID="cb842309812bd09d8c7783b91c41ce0c0aca29379de172505334f9078f2e28e7" Nov 26 14:57:17 crc kubenswrapper[5037]: I1126 14:57:17.933510 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f80c86bc-3752-494b-baa4-07549c0c183c" path="/var/lib/kubelet/pods/f80c86bc-3752-494b-baa4-07549c0c183c/volumes" Nov 26 14:57:22 crc kubenswrapper[5037]: I1126 14:57:22.908910 5037 scope.go:117] "RemoveContainer" containerID="5da9472de6a1894f3ccce7ec649d181fb0e98eab16a89d18071c1060c850c9e7" Nov 26 14:57:23 crc kubenswrapper[5037]: I1126 14:57:23.202638 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" event={"ID":"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb","Type":"ContainerStarted","Data":"37cf83c3f390120760ec1191c5d8fcbbbcbddc97fb8d3b32f04ab5574ccc4343"} Nov 26 14:59:41 crc kubenswrapper[5037]: I1126 14:59:41.247583 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 14:59:41 crc kubenswrapper[5037]: I1126 14:59:41.248667 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:00:00 crc kubenswrapper[5037]: I1126 15:00:00.176956 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402820-dznf7"] Nov 26 15:00:00 crc kubenswrapper[5037]: E1126 15:00:00.178658 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f80c86bc-3752-494b-baa4-07549c0c183c" containerName="registry-server" Nov 26 15:00:00 crc kubenswrapper[5037]: I1126 15:00:00.178685 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="f80c86bc-3752-494b-baa4-07549c0c183c" containerName="registry-server" Nov 26 15:00:00 crc kubenswrapper[5037]: E1126 15:00:00.178739 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f80c86bc-3752-494b-baa4-07549c0c183c" containerName="extract-utilities" Nov 26 15:00:00 crc kubenswrapper[5037]: I1126 15:00:00.178753 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="f80c86bc-3752-494b-baa4-07549c0c183c" containerName="extract-utilities" Nov 26 15:00:00 crc kubenswrapper[5037]: E1126 15:00:00.178775 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f80c86bc-3752-494b-baa4-07549c0c183c" containerName="extract-content" Nov 26 15:00:00 crc kubenswrapper[5037]: I1126 15:00:00.178788 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="f80c86bc-3752-494b-baa4-07549c0c183c" containerName="extract-content" Nov 26 15:00:00 crc kubenswrapper[5037]: I1126 15:00:00.179091 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="f80c86bc-3752-494b-baa4-07549c0c183c" containerName="registry-server" Nov 26 15:00:00 crc kubenswrapper[5037]: I1126 15:00:00.180204 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402820-dznf7" Nov 26 15:00:00 crc kubenswrapper[5037]: I1126 15:00:00.189834 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 15:00:00 crc kubenswrapper[5037]: I1126 15:00:00.190060 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 15:00:00 crc kubenswrapper[5037]: I1126 15:00:00.193645 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402820-dznf7"] Nov 26 15:00:00 crc kubenswrapper[5037]: I1126 15:00:00.312638 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9f5f8235-ed4c-48fc-95c7-7fd707821313-config-volume\") pod \"collect-profiles-29402820-dznf7\" (UID: \"9f5f8235-ed4c-48fc-95c7-7fd707821313\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402820-dznf7" Nov 26 15:00:00 crc kubenswrapper[5037]: I1126 15:00:00.312953 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xjr6z\" (UniqueName: \"kubernetes.io/projected/9f5f8235-ed4c-48fc-95c7-7fd707821313-kube-api-access-xjr6z\") pod \"collect-profiles-29402820-dznf7\" (UID: \"9f5f8235-ed4c-48fc-95c7-7fd707821313\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402820-dznf7" Nov 26 15:00:00 crc kubenswrapper[5037]: I1126 15:00:00.313077 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9f5f8235-ed4c-48fc-95c7-7fd707821313-secret-volume\") pod \"collect-profiles-29402820-dznf7\" (UID: \"9f5f8235-ed4c-48fc-95c7-7fd707821313\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402820-dznf7" Nov 26 15:00:00 crc kubenswrapper[5037]: I1126 15:00:00.415002 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9f5f8235-ed4c-48fc-95c7-7fd707821313-config-volume\") pod \"collect-profiles-29402820-dznf7\" (UID: \"9f5f8235-ed4c-48fc-95c7-7fd707821313\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402820-dznf7" Nov 26 15:00:00 crc kubenswrapper[5037]: I1126 15:00:00.415086 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xjr6z\" (UniqueName: \"kubernetes.io/projected/9f5f8235-ed4c-48fc-95c7-7fd707821313-kube-api-access-xjr6z\") pod \"collect-profiles-29402820-dznf7\" (UID: \"9f5f8235-ed4c-48fc-95c7-7fd707821313\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402820-dznf7" Nov 26 15:00:00 crc kubenswrapper[5037]: I1126 15:00:00.415165 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9f5f8235-ed4c-48fc-95c7-7fd707821313-secret-volume\") pod \"collect-profiles-29402820-dznf7\" (UID: \"9f5f8235-ed4c-48fc-95c7-7fd707821313\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402820-dznf7" Nov 26 15:00:00 crc kubenswrapper[5037]: I1126 15:00:00.416081 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9f5f8235-ed4c-48fc-95c7-7fd707821313-config-volume\") pod \"collect-profiles-29402820-dznf7\" (UID: \"9f5f8235-ed4c-48fc-95c7-7fd707821313\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402820-dznf7" Nov 26 15:00:00 crc kubenswrapper[5037]: I1126 15:00:00.437522 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9f5f8235-ed4c-48fc-95c7-7fd707821313-secret-volume\") pod \"collect-profiles-29402820-dznf7\" (UID: \"9f5f8235-ed4c-48fc-95c7-7fd707821313\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402820-dznf7" Nov 26 15:00:00 crc kubenswrapper[5037]: I1126 15:00:00.449535 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xjr6z\" (UniqueName: \"kubernetes.io/projected/9f5f8235-ed4c-48fc-95c7-7fd707821313-kube-api-access-xjr6z\") pod \"collect-profiles-29402820-dznf7\" (UID: \"9f5f8235-ed4c-48fc-95c7-7fd707821313\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402820-dznf7" Nov 26 15:00:00 crc kubenswrapper[5037]: I1126 15:00:00.516348 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402820-dznf7" Nov 26 15:00:00 crc kubenswrapper[5037]: I1126 15:00:00.796631 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402820-dznf7"] Nov 26 15:00:01 crc kubenswrapper[5037]: I1126 15:00:01.613018 5037 generic.go:334] "Generic (PLEG): container finished" podID="9f5f8235-ed4c-48fc-95c7-7fd707821313" containerID="21e19b20b44fbb895c7ef77ff801ec7bcdadc1bbef176e4451dbeb46f06d70e9" exitCode=0 Nov 26 15:00:01 crc kubenswrapper[5037]: I1126 15:00:01.613365 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402820-dznf7" event={"ID":"9f5f8235-ed4c-48fc-95c7-7fd707821313","Type":"ContainerDied","Data":"21e19b20b44fbb895c7ef77ff801ec7bcdadc1bbef176e4451dbeb46f06d70e9"} Nov 26 15:00:01 crc kubenswrapper[5037]: I1126 15:00:01.613398 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402820-dznf7" event={"ID":"9f5f8235-ed4c-48fc-95c7-7fd707821313","Type":"ContainerStarted","Data":"d61c16feabd8e21a03d58d114679442ad22f88da3c345bd1800c9e8fe46cbb1b"} Nov 26 15:00:02 crc kubenswrapper[5037]: I1126 15:00:02.986046 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402820-dznf7" Nov 26 15:00:03 crc kubenswrapper[5037]: I1126 15:00:03.057891 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9f5f8235-ed4c-48fc-95c7-7fd707821313-config-volume\") pod \"9f5f8235-ed4c-48fc-95c7-7fd707821313\" (UID: \"9f5f8235-ed4c-48fc-95c7-7fd707821313\") " Nov 26 15:00:03 crc kubenswrapper[5037]: I1126 15:00:03.057980 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9f5f8235-ed4c-48fc-95c7-7fd707821313-secret-volume\") pod \"9f5f8235-ed4c-48fc-95c7-7fd707821313\" (UID: \"9f5f8235-ed4c-48fc-95c7-7fd707821313\") " Nov 26 15:00:03 crc kubenswrapper[5037]: I1126 15:00:03.058032 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xjr6z\" (UniqueName: \"kubernetes.io/projected/9f5f8235-ed4c-48fc-95c7-7fd707821313-kube-api-access-xjr6z\") pod \"9f5f8235-ed4c-48fc-95c7-7fd707821313\" (UID: \"9f5f8235-ed4c-48fc-95c7-7fd707821313\") " Nov 26 15:00:03 crc kubenswrapper[5037]: I1126 15:00:03.059407 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9f5f8235-ed4c-48fc-95c7-7fd707821313-config-volume" (OuterVolumeSpecName: "config-volume") pod "9f5f8235-ed4c-48fc-95c7-7fd707821313" (UID: "9f5f8235-ed4c-48fc-95c7-7fd707821313"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:00:03 crc kubenswrapper[5037]: I1126 15:00:03.065936 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f5f8235-ed4c-48fc-95c7-7fd707821313-kube-api-access-xjr6z" (OuterVolumeSpecName: "kube-api-access-xjr6z") pod "9f5f8235-ed4c-48fc-95c7-7fd707821313" (UID: "9f5f8235-ed4c-48fc-95c7-7fd707821313"). InnerVolumeSpecName "kube-api-access-xjr6z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:00:03 crc kubenswrapper[5037]: I1126 15:00:03.068496 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f5f8235-ed4c-48fc-95c7-7fd707821313-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "9f5f8235-ed4c-48fc-95c7-7fd707821313" (UID: "9f5f8235-ed4c-48fc-95c7-7fd707821313"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:00:03 crc kubenswrapper[5037]: I1126 15:00:03.159348 5037 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9f5f8235-ed4c-48fc-95c7-7fd707821313-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 15:00:03 crc kubenswrapper[5037]: I1126 15:00:03.159395 5037 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9f5f8235-ed4c-48fc-95c7-7fd707821313-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 15:00:03 crc kubenswrapper[5037]: I1126 15:00:03.159410 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xjr6z\" (UniqueName: \"kubernetes.io/projected/9f5f8235-ed4c-48fc-95c7-7fd707821313-kube-api-access-xjr6z\") on node \"crc\" DevicePath \"\"" Nov 26 15:00:03 crc kubenswrapper[5037]: I1126 15:00:03.646451 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402820-dznf7" event={"ID":"9f5f8235-ed4c-48fc-95c7-7fd707821313","Type":"ContainerDied","Data":"d61c16feabd8e21a03d58d114679442ad22f88da3c345bd1800c9e8fe46cbb1b"} Nov 26 15:00:03 crc kubenswrapper[5037]: I1126 15:00:03.646497 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d61c16feabd8e21a03d58d114679442ad22f88da3c345bd1800c9e8fe46cbb1b" Nov 26 15:00:03 crc kubenswrapper[5037]: I1126 15:00:03.646559 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402820-dznf7" Nov 26 15:00:04 crc kubenswrapper[5037]: I1126 15:00:04.072451 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402775-ts75f"] Nov 26 15:00:04 crc kubenswrapper[5037]: I1126 15:00:04.080834 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402775-ts75f"] Nov 26 15:00:05 crc kubenswrapper[5037]: I1126 15:00:05.920671 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da42804b-0fa3-43ee-9566-296c28b8052f" path="/var/lib/kubelet/pods/da42804b-0fa3-43ee-9566-296c28b8052f/volumes" Nov 26 15:00:11 crc kubenswrapper[5037]: I1126 15:00:11.247937 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:00:11 crc kubenswrapper[5037]: I1126 15:00:11.248670 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:00:41 crc kubenswrapper[5037]: I1126 15:00:41.247660 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:00:41 crc kubenswrapper[5037]: I1126 15:00:41.248340 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:00:41 crc kubenswrapper[5037]: I1126 15:00:41.248472 5037 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" Nov 26 15:00:41 crc kubenswrapper[5037]: I1126 15:00:41.249413 5037 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"37cf83c3f390120760ec1191c5d8fcbbbcbddc97fb8d3b32f04ab5574ccc4343"} pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 15:00:41 crc kubenswrapper[5037]: I1126 15:00:41.249510 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" containerID="cri-o://37cf83c3f390120760ec1191c5d8fcbbbcbddc97fb8d3b32f04ab5574ccc4343" gracePeriod=600 Nov 26 15:00:42 crc kubenswrapper[5037]: I1126 15:00:42.020347 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" event={"ID":"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb","Type":"ContainerDied","Data":"37cf83c3f390120760ec1191c5d8fcbbbcbddc97fb8d3b32f04ab5574ccc4343"} Nov 26 15:00:42 crc kubenswrapper[5037]: I1126 15:00:42.020844 5037 scope.go:117] "RemoveContainer" containerID="5da9472de6a1894f3ccce7ec649d181fb0e98eab16a89d18071c1060c850c9e7" Nov 26 15:00:42 crc kubenswrapper[5037]: I1126 15:00:42.020275 5037 generic.go:334] "Generic (PLEG): container finished" podID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerID="37cf83c3f390120760ec1191c5d8fcbbbcbddc97fb8d3b32f04ab5574ccc4343" exitCode=0 Nov 26 15:00:42 crc kubenswrapper[5037]: I1126 15:00:42.020977 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" event={"ID":"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb","Type":"ContainerStarted","Data":"c17fa9163e25f42ca62fe36b2c3a9409a3069b6f71f2694fb63cb4b9446e2de1"} Nov 26 15:00:50 crc kubenswrapper[5037]: I1126 15:00:50.500647 5037 scope.go:117] "RemoveContainer" containerID="e0b94887a7ceb773846edbc19d5674bf4b5cd5e32774aaa15c6c7aa979d9bd40" Nov 26 15:02:41 crc kubenswrapper[5037]: I1126 15:02:41.247371 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:02:41 crc kubenswrapper[5037]: I1126 15:02:41.247863 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:03:11 crc kubenswrapper[5037]: I1126 15:03:11.247423 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:03:11 crc kubenswrapper[5037]: I1126 15:03:11.248124 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:03:41 crc kubenswrapper[5037]: I1126 15:03:41.247498 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:03:41 crc kubenswrapper[5037]: I1126 15:03:41.248375 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:03:41 crc kubenswrapper[5037]: I1126 15:03:41.248464 5037 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" Nov 26 15:03:41 crc kubenswrapper[5037]: I1126 15:03:41.249724 5037 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c17fa9163e25f42ca62fe36b2c3a9409a3069b6f71f2694fb63cb4b9446e2de1"} pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 15:03:41 crc kubenswrapper[5037]: I1126 15:03:41.249866 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" containerID="cri-o://c17fa9163e25f42ca62fe36b2c3a9409a3069b6f71f2694fb63cb4b9446e2de1" gracePeriod=600 Nov 26 15:03:41 crc kubenswrapper[5037]: E1126 15:03:41.381987 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:03:41 crc kubenswrapper[5037]: I1126 15:03:41.665932 5037 generic.go:334] "Generic (PLEG): container finished" podID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerID="c17fa9163e25f42ca62fe36b2c3a9409a3069b6f71f2694fb63cb4b9446e2de1" exitCode=0 Nov 26 15:03:41 crc kubenswrapper[5037]: I1126 15:03:41.665971 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" event={"ID":"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb","Type":"ContainerDied","Data":"c17fa9163e25f42ca62fe36b2c3a9409a3069b6f71f2694fb63cb4b9446e2de1"} Nov 26 15:03:41 crc kubenswrapper[5037]: I1126 15:03:41.666010 5037 scope.go:117] "RemoveContainer" containerID="37cf83c3f390120760ec1191c5d8fcbbbcbddc97fb8d3b32f04ab5574ccc4343" Nov 26 15:03:41 crc kubenswrapper[5037]: I1126 15:03:41.666529 5037 scope.go:117] "RemoveContainer" containerID="c17fa9163e25f42ca62fe36b2c3a9409a3069b6f71f2694fb63cb4b9446e2de1" Nov 26 15:03:41 crc kubenswrapper[5037]: E1126 15:03:41.666773 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:03:52 crc kubenswrapper[5037]: I1126 15:03:52.909377 5037 scope.go:117] "RemoveContainer" containerID="c17fa9163e25f42ca62fe36b2c3a9409a3069b6f71f2694fb63cb4b9446e2de1" Nov 26 15:03:52 crc kubenswrapper[5037]: E1126 15:03:52.911605 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:04:04 crc kubenswrapper[5037]: I1126 15:04:04.909367 5037 scope.go:117] "RemoveContainer" containerID="c17fa9163e25f42ca62fe36b2c3a9409a3069b6f71f2694fb63cb4b9446e2de1" Nov 26 15:04:04 crc kubenswrapper[5037]: E1126 15:04:04.910585 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:04:18 crc kubenswrapper[5037]: I1126 15:04:18.908971 5037 scope.go:117] "RemoveContainer" containerID="c17fa9163e25f42ca62fe36b2c3a9409a3069b6f71f2694fb63cb4b9446e2de1" Nov 26 15:04:18 crc kubenswrapper[5037]: E1126 15:04:18.910338 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:04:29 crc kubenswrapper[5037]: I1126 15:04:29.908771 5037 scope.go:117] "RemoveContainer" containerID="c17fa9163e25f42ca62fe36b2c3a9409a3069b6f71f2694fb63cb4b9446e2de1" Nov 26 15:04:29 crc kubenswrapper[5037]: E1126 15:04:29.909817 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:04:42 crc kubenswrapper[5037]: I1126 15:04:42.909605 5037 scope.go:117] "RemoveContainer" containerID="c17fa9163e25f42ca62fe36b2c3a9409a3069b6f71f2694fb63cb4b9446e2de1" Nov 26 15:04:42 crc kubenswrapper[5037]: E1126 15:04:42.910821 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:04:53 crc kubenswrapper[5037]: I1126 15:04:53.916392 5037 scope.go:117] "RemoveContainer" containerID="c17fa9163e25f42ca62fe36b2c3a9409a3069b6f71f2694fb63cb4b9446e2de1" Nov 26 15:04:53 crc kubenswrapper[5037]: E1126 15:04:53.917334 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:04:58 crc kubenswrapper[5037]: I1126 15:04:58.303774 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-j5pg4"] Nov 26 15:04:58 crc kubenswrapper[5037]: E1126 15:04:58.304545 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f5f8235-ed4c-48fc-95c7-7fd707821313" containerName="collect-profiles" Nov 26 15:04:58 crc kubenswrapper[5037]: I1126 15:04:58.304557 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f5f8235-ed4c-48fc-95c7-7fd707821313" containerName="collect-profiles" Nov 26 15:04:58 crc kubenswrapper[5037]: I1126 15:04:58.304710 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f5f8235-ed4c-48fc-95c7-7fd707821313" containerName="collect-profiles" Nov 26 15:04:58 crc kubenswrapper[5037]: I1126 15:04:58.305695 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j5pg4" Nov 26 15:04:58 crc kubenswrapper[5037]: I1126 15:04:58.323445 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e435b80-8de9-4629-8749-f80a3128ba61-catalog-content\") pod \"redhat-operators-j5pg4\" (UID: \"8e435b80-8de9-4629-8749-f80a3128ba61\") " pod="openshift-marketplace/redhat-operators-j5pg4" Nov 26 15:04:58 crc kubenswrapper[5037]: I1126 15:04:58.323610 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l46r5\" (UniqueName: \"kubernetes.io/projected/8e435b80-8de9-4629-8749-f80a3128ba61-kube-api-access-l46r5\") pod \"redhat-operators-j5pg4\" (UID: \"8e435b80-8de9-4629-8749-f80a3128ba61\") " pod="openshift-marketplace/redhat-operators-j5pg4" Nov 26 15:04:58 crc kubenswrapper[5037]: I1126 15:04:58.323683 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e435b80-8de9-4629-8749-f80a3128ba61-utilities\") pod \"redhat-operators-j5pg4\" (UID: \"8e435b80-8de9-4629-8749-f80a3128ba61\") " pod="openshift-marketplace/redhat-operators-j5pg4" Nov 26 15:04:58 crc kubenswrapper[5037]: I1126 15:04:58.326590 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-j5pg4"] Nov 26 15:04:58 crc kubenswrapper[5037]: I1126 15:04:58.424557 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l46r5\" (UniqueName: \"kubernetes.io/projected/8e435b80-8de9-4629-8749-f80a3128ba61-kube-api-access-l46r5\") pod \"redhat-operators-j5pg4\" (UID: \"8e435b80-8de9-4629-8749-f80a3128ba61\") " pod="openshift-marketplace/redhat-operators-j5pg4" Nov 26 15:04:58 crc kubenswrapper[5037]: I1126 15:04:58.424623 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e435b80-8de9-4629-8749-f80a3128ba61-utilities\") pod \"redhat-operators-j5pg4\" (UID: \"8e435b80-8de9-4629-8749-f80a3128ba61\") " pod="openshift-marketplace/redhat-operators-j5pg4" Nov 26 15:04:58 crc kubenswrapper[5037]: I1126 15:04:58.424664 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e435b80-8de9-4629-8749-f80a3128ba61-catalog-content\") pod \"redhat-operators-j5pg4\" (UID: \"8e435b80-8de9-4629-8749-f80a3128ba61\") " pod="openshift-marketplace/redhat-operators-j5pg4" Nov 26 15:04:58 crc kubenswrapper[5037]: I1126 15:04:58.425096 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e435b80-8de9-4629-8749-f80a3128ba61-catalog-content\") pod \"redhat-operators-j5pg4\" (UID: \"8e435b80-8de9-4629-8749-f80a3128ba61\") " pod="openshift-marketplace/redhat-operators-j5pg4" Nov 26 15:04:58 crc kubenswrapper[5037]: I1126 15:04:58.425194 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e435b80-8de9-4629-8749-f80a3128ba61-utilities\") pod \"redhat-operators-j5pg4\" (UID: \"8e435b80-8de9-4629-8749-f80a3128ba61\") " pod="openshift-marketplace/redhat-operators-j5pg4" Nov 26 15:04:58 crc kubenswrapper[5037]: I1126 15:04:58.454109 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l46r5\" (UniqueName: \"kubernetes.io/projected/8e435b80-8de9-4629-8749-f80a3128ba61-kube-api-access-l46r5\") pod \"redhat-operators-j5pg4\" (UID: \"8e435b80-8de9-4629-8749-f80a3128ba61\") " pod="openshift-marketplace/redhat-operators-j5pg4" Nov 26 15:04:58 crc kubenswrapper[5037]: I1126 15:04:58.629074 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j5pg4" Nov 26 15:04:58 crc kubenswrapper[5037]: I1126 15:04:58.860610 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-j5pg4"] Nov 26 15:04:59 crc kubenswrapper[5037]: I1126 15:04:59.401943 5037 generic.go:334] "Generic (PLEG): container finished" podID="8e435b80-8de9-4629-8749-f80a3128ba61" containerID="3b672281fee9a620de13bd08d8450d9504568826a7fd0d5e3b186b6b1ae6993e" exitCode=0 Nov 26 15:04:59 crc kubenswrapper[5037]: I1126 15:04:59.402266 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j5pg4" event={"ID":"8e435b80-8de9-4629-8749-f80a3128ba61","Type":"ContainerDied","Data":"3b672281fee9a620de13bd08d8450d9504568826a7fd0d5e3b186b6b1ae6993e"} Nov 26 15:04:59 crc kubenswrapper[5037]: I1126 15:04:59.402330 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j5pg4" event={"ID":"8e435b80-8de9-4629-8749-f80a3128ba61","Type":"ContainerStarted","Data":"ef181be1428b90818bf027e8f2fd11280278821b2ec99746ab89e3828c99f2db"} Nov 26 15:04:59 crc kubenswrapper[5037]: I1126 15:04:59.403803 5037 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 15:05:00 crc kubenswrapper[5037]: I1126 15:05:00.412601 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j5pg4" event={"ID":"8e435b80-8de9-4629-8749-f80a3128ba61","Type":"ContainerStarted","Data":"1cf35337772b3549ee670620db6b1c513d061e7764b3811d172eaebfde9da9b2"} Nov 26 15:05:01 crc kubenswrapper[5037]: I1126 15:05:01.426071 5037 generic.go:334] "Generic (PLEG): container finished" podID="8e435b80-8de9-4629-8749-f80a3128ba61" containerID="1cf35337772b3549ee670620db6b1c513d061e7764b3811d172eaebfde9da9b2" exitCode=0 Nov 26 15:05:01 crc kubenswrapper[5037]: I1126 15:05:01.426159 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j5pg4" event={"ID":"8e435b80-8de9-4629-8749-f80a3128ba61","Type":"ContainerDied","Data":"1cf35337772b3549ee670620db6b1c513d061e7764b3811d172eaebfde9da9b2"} Nov 26 15:05:02 crc kubenswrapper[5037]: I1126 15:05:02.442919 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j5pg4" event={"ID":"8e435b80-8de9-4629-8749-f80a3128ba61","Type":"ContainerStarted","Data":"03cd956fdec5c8f98a210e602eca559f54f9c299c199a4bf0896b40f81217795"} Nov 26 15:05:02 crc kubenswrapper[5037]: I1126 15:05:02.467901 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-j5pg4" podStartSLOduration=1.788916612 podStartE2EDuration="4.467875955s" podCreationTimestamp="2025-11-26 15:04:58 +0000 UTC" firstStartedPulling="2025-11-26 15:04:59.403563059 +0000 UTC m=+2966.200333253" lastFinishedPulling="2025-11-26 15:05:02.082522362 +0000 UTC m=+2968.879292596" observedRunningTime="2025-11-26 15:05:02.463337484 +0000 UTC m=+2969.260107708" watchObservedRunningTime="2025-11-26 15:05:02.467875955 +0000 UTC m=+2969.264646169" Nov 26 15:05:06 crc kubenswrapper[5037]: I1126 15:05:06.908553 5037 scope.go:117] "RemoveContainer" containerID="c17fa9163e25f42ca62fe36b2c3a9409a3069b6f71f2694fb63cb4b9446e2de1" Nov 26 15:05:06 crc kubenswrapper[5037]: E1126 15:05:06.909515 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:05:08 crc kubenswrapper[5037]: I1126 15:05:08.630360 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-j5pg4" Nov 26 15:05:08 crc kubenswrapper[5037]: I1126 15:05:08.630426 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-j5pg4" Nov 26 15:05:08 crc kubenswrapper[5037]: I1126 15:05:08.703155 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-j5pg4" Nov 26 15:05:09 crc kubenswrapper[5037]: I1126 15:05:09.575621 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-j5pg4" Nov 26 15:05:09 crc kubenswrapper[5037]: I1126 15:05:09.651552 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-j5pg4"] Nov 26 15:05:11 crc kubenswrapper[5037]: I1126 15:05:11.520646 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-j5pg4" podUID="8e435b80-8de9-4629-8749-f80a3128ba61" containerName="registry-server" containerID="cri-o://03cd956fdec5c8f98a210e602eca559f54f9c299c199a4bf0896b40f81217795" gracePeriod=2 Nov 26 15:05:11 crc kubenswrapper[5037]: E1126 15:05:11.739225 5037 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8e435b80_8de9_4629_8749_f80a3128ba61.slice/crio-03cd956fdec5c8f98a210e602eca559f54f9c299c199a4bf0896b40f81217795.scope\": RecentStats: unable to find data in memory cache]" Nov 26 15:05:12 crc kubenswrapper[5037]: I1126 15:05:12.532123 5037 generic.go:334] "Generic (PLEG): container finished" podID="8e435b80-8de9-4629-8749-f80a3128ba61" containerID="03cd956fdec5c8f98a210e602eca559f54f9c299c199a4bf0896b40f81217795" exitCode=0 Nov 26 15:05:12 crc kubenswrapper[5037]: I1126 15:05:12.532180 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j5pg4" event={"ID":"8e435b80-8de9-4629-8749-f80a3128ba61","Type":"ContainerDied","Data":"03cd956fdec5c8f98a210e602eca559f54f9c299c199a4bf0896b40f81217795"} Nov 26 15:05:13 crc kubenswrapper[5037]: I1126 15:05:13.083737 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j5pg4" Nov 26 15:05:13 crc kubenswrapper[5037]: I1126 15:05:13.175164 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e435b80-8de9-4629-8749-f80a3128ba61-catalog-content\") pod \"8e435b80-8de9-4629-8749-f80a3128ba61\" (UID: \"8e435b80-8de9-4629-8749-f80a3128ba61\") " Nov 26 15:05:13 crc kubenswrapper[5037]: I1126 15:05:13.175227 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l46r5\" (UniqueName: \"kubernetes.io/projected/8e435b80-8de9-4629-8749-f80a3128ba61-kube-api-access-l46r5\") pod \"8e435b80-8de9-4629-8749-f80a3128ba61\" (UID: \"8e435b80-8de9-4629-8749-f80a3128ba61\") " Nov 26 15:05:13 crc kubenswrapper[5037]: I1126 15:05:13.175309 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e435b80-8de9-4629-8749-f80a3128ba61-utilities\") pod \"8e435b80-8de9-4629-8749-f80a3128ba61\" (UID: \"8e435b80-8de9-4629-8749-f80a3128ba61\") " Nov 26 15:05:13 crc kubenswrapper[5037]: I1126 15:05:13.176516 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8e435b80-8de9-4629-8749-f80a3128ba61-utilities" (OuterVolumeSpecName: "utilities") pod "8e435b80-8de9-4629-8749-f80a3128ba61" (UID: "8e435b80-8de9-4629-8749-f80a3128ba61"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:05:13 crc kubenswrapper[5037]: I1126 15:05:13.185568 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e435b80-8de9-4629-8749-f80a3128ba61-kube-api-access-l46r5" (OuterVolumeSpecName: "kube-api-access-l46r5") pod "8e435b80-8de9-4629-8749-f80a3128ba61" (UID: "8e435b80-8de9-4629-8749-f80a3128ba61"). InnerVolumeSpecName "kube-api-access-l46r5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:05:13 crc kubenswrapper[5037]: I1126 15:05:13.277157 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l46r5\" (UniqueName: \"kubernetes.io/projected/8e435b80-8de9-4629-8749-f80a3128ba61-kube-api-access-l46r5\") on node \"crc\" DevicePath \"\"" Nov 26 15:05:13 crc kubenswrapper[5037]: I1126 15:05:13.277547 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e435b80-8de9-4629-8749-f80a3128ba61-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 15:05:13 crc kubenswrapper[5037]: I1126 15:05:13.301917 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8e435b80-8de9-4629-8749-f80a3128ba61-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8e435b80-8de9-4629-8749-f80a3128ba61" (UID: "8e435b80-8de9-4629-8749-f80a3128ba61"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:05:13 crc kubenswrapper[5037]: I1126 15:05:13.379497 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e435b80-8de9-4629-8749-f80a3128ba61-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 15:05:13 crc kubenswrapper[5037]: I1126 15:05:13.545256 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j5pg4" event={"ID":"8e435b80-8de9-4629-8749-f80a3128ba61","Type":"ContainerDied","Data":"ef181be1428b90818bf027e8f2fd11280278821b2ec99746ab89e3828c99f2db"} Nov 26 15:05:13 crc kubenswrapper[5037]: I1126 15:05:13.545334 5037 scope.go:117] "RemoveContainer" containerID="03cd956fdec5c8f98a210e602eca559f54f9c299c199a4bf0896b40f81217795" Nov 26 15:05:13 crc kubenswrapper[5037]: I1126 15:05:13.545335 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j5pg4" Nov 26 15:05:13 crc kubenswrapper[5037]: I1126 15:05:13.566174 5037 scope.go:117] "RemoveContainer" containerID="1cf35337772b3549ee670620db6b1c513d061e7764b3811d172eaebfde9da9b2" Nov 26 15:05:13 crc kubenswrapper[5037]: I1126 15:05:13.601589 5037 scope.go:117] "RemoveContainer" containerID="3b672281fee9a620de13bd08d8450d9504568826a7fd0d5e3b186b6b1ae6993e" Nov 26 15:05:13 crc kubenswrapper[5037]: I1126 15:05:13.610197 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-j5pg4"] Nov 26 15:05:13 crc kubenswrapper[5037]: I1126 15:05:13.616489 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-j5pg4"] Nov 26 15:05:13 crc kubenswrapper[5037]: I1126 15:05:13.924128 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e435b80-8de9-4629-8749-f80a3128ba61" path="/var/lib/kubelet/pods/8e435b80-8de9-4629-8749-f80a3128ba61/volumes" Nov 26 15:05:19 crc kubenswrapper[5037]: I1126 15:05:19.909192 5037 scope.go:117] "RemoveContainer" containerID="c17fa9163e25f42ca62fe36b2c3a9409a3069b6f71f2694fb63cb4b9446e2de1" Nov 26 15:05:19 crc kubenswrapper[5037]: E1126 15:05:19.911634 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:05:34 crc kubenswrapper[5037]: I1126 15:05:34.908448 5037 scope.go:117] "RemoveContainer" containerID="c17fa9163e25f42ca62fe36b2c3a9409a3069b6f71f2694fb63cb4b9446e2de1" Nov 26 15:05:34 crc kubenswrapper[5037]: E1126 15:05:34.909174 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:05:47 crc kubenswrapper[5037]: I1126 15:05:47.840682 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-4hvmj"] Nov 26 15:05:47 crc kubenswrapper[5037]: E1126 15:05:47.841613 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e435b80-8de9-4629-8749-f80a3128ba61" containerName="extract-utilities" Nov 26 15:05:47 crc kubenswrapper[5037]: I1126 15:05:47.841635 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e435b80-8de9-4629-8749-f80a3128ba61" containerName="extract-utilities" Nov 26 15:05:47 crc kubenswrapper[5037]: E1126 15:05:47.841653 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e435b80-8de9-4629-8749-f80a3128ba61" containerName="extract-content" Nov 26 15:05:47 crc kubenswrapper[5037]: I1126 15:05:47.841663 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e435b80-8de9-4629-8749-f80a3128ba61" containerName="extract-content" Nov 26 15:05:47 crc kubenswrapper[5037]: E1126 15:05:47.841682 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e435b80-8de9-4629-8749-f80a3128ba61" containerName="registry-server" Nov 26 15:05:47 crc kubenswrapper[5037]: I1126 15:05:47.841692 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e435b80-8de9-4629-8749-f80a3128ba61" containerName="registry-server" Nov 26 15:05:47 crc kubenswrapper[5037]: I1126 15:05:47.841929 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e435b80-8de9-4629-8749-f80a3128ba61" containerName="registry-server" Nov 26 15:05:47 crc kubenswrapper[5037]: I1126 15:05:47.843557 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4hvmj" Nov 26 15:05:47 crc kubenswrapper[5037]: I1126 15:05:47.867558 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4hvmj"] Nov 26 15:05:48 crc kubenswrapper[5037]: I1126 15:05:48.016553 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d36ca832-b26c-425f-a730-507c9c9defe8-catalog-content\") pod \"redhat-marketplace-4hvmj\" (UID: \"d36ca832-b26c-425f-a730-507c9c9defe8\") " pod="openshift-marketplace/redhat-marketplace-4hvmj" Nov 26 15:05:48 crc kubenswrapper[5037]: I1126 15:05:48.016683 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ckzm4\" (UniqueName: \"kubernetes.io/projected/d36ca832-b26c-425f-a730-507c9c9defe8-kube-api-access-ckzm4\") pod \"redhat-marketplace-4hvmj\" (UID: \"d36ca832-b26c-425f-a730-507c9c9defe8\") " pod="openshift-marketplace/redhat-marketplace-4hvmj" Nov 26 15:05:48 crc kubenswrapper[5037]: I1126 15:05:48.016722 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d36ca832-b26c-425f-a730-507c9c9defe8-utilities\") pod \"redhat-marketplace-4hvmj\" (UID: \"d36ca832-b26c-425f-a730-507c9c9defe8\") " pod="openshift-marketplace/redhat-marketplace-4hvmj" Nov 26 15:05:48 crc kubenswrapper[5037]: I1126 15:05:48.118488 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ckzm4\" (UniqueName: \"kubernetes.io/projected/d36ca832-b26c-425f-a730-507c9c9defe8-kube-api-access-ckzm4\") pod \"redhat-marketplace-4hvmj\" (UID: \"d36ca832-b26c-425f-a730-507c9c9defe8\") " pod="openshift-marketplace/redhat-marketplace-4hvmj" Nov 26 15:05:48 crc kubenswrapper[5037]: I1126 15:05:48.118553 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d36ca832-b26c-425f-a730-507c9c9defe8-utilities\") pod \"redhat-marketplace-4hvmj\" (UID: \"d36ca832-b26c-425f-a730-507c9c9defe8\") " pod="openshift-marketplace/redhat-marketplace-4hvmj" Nov 26 15:05:48 crc kubenswrapper[5037]: I1126 15:05:48.119076 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d36ca832-b26c-425f-a730-507c9c9defe8-utilities\") pod \"redhat-marketplace-4hvmj\" (UID: \"d36ca832-b26c-425f-a730-507c9c9defe8\") " pod="openshift-marketplace/redhat-marketplace-4hvmj" Nov 26 15:05:48 crc kubenswrapper[5037]: I1126 15:05:48.119144 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d36ca832-b26c-425f-a730-507c9c9defe8-catalog-content\") pod \"redhat-marketplace-4hvmj\" (UID: \"d36ca832-b26c-425f-a730-507c9c9defe8\") " pod="openshift-marketplace/redhat-marketplace-4hvmj" Nov 26 15:05:48 crc kubenswrapper[5037]: I1126 15:05:48.119408 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d36ca832-b26c-425f-a730-507c9c9defe8-catalog-content\") pod \"redhat-marketplace-4hvmj\" (UID: \"d36ca832-b26c-425f-a730-507c9c9defe8\") " pod="openshift-marketplace/redhat-marketplace-4hvmj" Nov 26 15:05:48 crc kubenswrapper[5037]: I1126 15:05:48.137015 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ckzm4\" (UniqueName: \"kubernetes.io/projected/d36ca832-b26c-425f-a730-507c9c9defe8-kube-api-access-ckzm4\") pod \"redhat-marketplace-4hvmj\" (UID: \"d36ca832-b26c-425f-a730-507c9c9defe8\") " pod="openshift-marketplace/redhat-marketplace-4hvmj" Nov 26 15:05:48 crc kubenswrapper[5037]: I1126 15:05:48.166591 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4hvmj" Nov 26 15:05:48 crc kubenswrapper[5037]: I1126 15:05:48.627349 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4hvmj"] Nov 26 15:05:48 crc kubenswrapper[5037]: I1126 15:05:48.836194 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4hvmj" event={"ID":"d36ca832-b26c-425f-a730-507c9c9defe8","Type":"ContainerStarted","Data":"aecb72b4d5ff47de85d948817803494c367534de417279e7ffeea5b9b38704de"} Nov 26 15:05:49 crc kubenswrapper[5037]: I1126 15:05:49.846936 5037 generic.go:334] "Generic (PLEG): container finished" podID="d36ca832-b26c-425f-a730-507c9c9defe8" containerID="3704a6156d465777cdbcb646a2488fc94ac2140d3755f6dd24f700d1736fed65" exitCode=0 Nov 26 15:05:49 crc kubenswrapper[5037]: I1126 15:05:49.847022 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4hvmj" event={"ID":"d36ca832-b26c-425f-a730-507c9c9defe8","Type":"ContainerDied","Data":"3704a6156d465777cdbcb646a2488fc94ac2140d3755f6dd24f700d1736fed65"} Nov 26 15:05:49 crc kubenswrapper[5037]: I1126 15:05:49.908337 5037 scope.go:117] "RemoveContainer" containerID="c17fa9163e25f42ca62fe36b2c3a9409a3069b6f71f2694fb63cb4b9446e2de1" Nov 26 15:05:49 crc kubenswrapper[5037]: E1126 15:05:49.908614 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:05:50 crc kubenswrapper[5037]: I1126 15:05:50.858564 5037 generic.go:334] "Generic (PLEG): container finished" podID="d36ca832-b26c-425f-a730-507c9c9defe8" containerID="03dd32b0c550aff98eb3ca0f873d0f0b520e88c77638d7fe26b3be140c72d38c" exitCode=0 Nov 26 15:05:50 crc kubenswrapper[5037]: I1126 15:05:50.858618 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4hvmj" event={"ID":"d36ca832-b26c-425f-a730-507c9c9defe8","Type":"ContainerDied","Data":"03dd32b0c550aff98eb3ca0f873d0f0b520e88c77638d7fe26b3be140c72d38c"} Nov 26 15:05:51 crc kubenswrapper[5037]: I1126 15:05:51.867676 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4hvmj" event={"ID":"d36ca832-b26c-425f-a730-507c9c9defe8","Type":"ContainerStarted","Data":"622f76830adfea5d9b280337fd8fe9ac685ec56f0216141e8bde838bc18b4741"} Nov 26 15:05:51 crc kubenswrapper[5037]: I1126 15:05:51.901005 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-4hvmj" podStartSLOduration=3.371904797 podStartE2EDuration="4.900988821s" podCreationTimestamp="2025-11-26 15:05:47 +0000 UTC" firstStartedPulling="2025-11-26 15:05:49.850896597 +0000 UTC m=+3016.647666811" lastFinishedPulling="2025-11-26 15:05:51.379980621 +0000 UTC m=+3018.176750835" observedRunningTime="2025-11-26 15:05:51.89842693 +0000 UTC m=+3018.695197124" watchObservedRunningTime="2025-11-26 15:05:51.900988821 +0000 UTC m=+3018.697759005" Nov 26 15:05:58 crc kubenswrapper[5037]: I1126 15:05:58.167148 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-4hvmj" Nov 26 15:05:58 crc kubenswrapper[5037]: I1126 15:05:58.167632 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-4hvmj" Nov 26 15:05:58 crc kubenswrapper[5037]: I1126 15:05:58.232194 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-4hvmj" Nov 26 15:05:58 crc kubenswrapper[5037]: I1126 15:05:58.979411 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-4hvmj" Nov 26 15:05:59 crc kubenswrapper[5037]: I1126 15:05:59.034219 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4hvmj"] Nov 26 15:06:00 crc kubenswrapper[5037]: I1126 15:06:00.909497 5037 scope.go:117] "RemoveContainer" containerID="c17fa9163e25f42ca62fe36b2c3a9409a3069b6f71f2694fb63cb4b9446e2de1" Nov 26 15:06:00 crc kubenswrapper[5037]: E1126 15:06:00.910000 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:06:00 crc kubenswrapper[5037]: I1126 15:06:00.952454 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-4hvmj" podUID="d36ca832-b26c-425f-a730-507c9c9defe8" containerName="registry-server" containerID="cri-o://622f76830adfea5d9b280337fd8fe9ac685ec56f0216141e8bde838bc18b4741" gracePeriod=2 Nov 26 15:06:01 crc kubenswrapper[5037]: I1126 15:06:01.434952 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4hvmj" Nov 26 15:06:01 crc kubenswrapper[5037]: I1126 15:06:01.466468 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d36ca832-b26c-425f-a730-507c9c9defe8-catalog-content\") pod \"d36ca832-b26c-425f-a730-507c9c9defe8\" (UID: \"d36ca832-b26c-425f-a730-507c9c9defe8\") " Nov 26 15:06:01 crc kubenswrapper[5037]: I1126 15:06:01.466576 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ckzm4\" (UniqueName: \"kubernetes.io/projected/d36ca832-b26c-425f-a730-507c9c9defe8-kube-api-access-ckzm4\") pod \"d36ca832-b26c-425f-a730-507c9c9defe8\" (UID: \"d36ca832-b26c-425f-a730-507c9c9defe8\") " Nov 26 15:06:01 crc kubenswrapper[5037]: I1126 15:06:01.466614 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d36ca832-b26c-425f-a730-507c9c9defe8-utilities\") pod \"d36ca832-b26c-425f-a730-507c9c9defe8\" (UID: \"d36ca832-b26c-425f-a730-507c9c9defe8\") " Nov 26 15:06:01 crc kubenswrapper[5037]: I1126 15:06:01.468267 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d36ca832-b26c-425f-a730-507c9c9defe8-utilities" (OuterVolumeSpecName: "utilities") pod "d36ca832-b26c-425f-a730-507c9c9defe8" (UID: "d36ca832-b26c-425f-a730-507c9c9defe8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:06:01 crc kubenswrapper[5037]: I1126 15:06:01.476428 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d36ca832-b26c-425f-a730-507c9c9defe8-kube-api-access-ckzm4" (OuterVolumeSpecName: "kube-api-access-ckzm4") pod "d36ca832-b26c-425f-a730-507c9c9defe8" (UID: "d36ca832-b26c-425f-a730-507c9c9defe8"). InnerVolumeSpecName "kube-api-access-ckzm4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:06:01 crc kubenswrapper[5037]: I1126 15:06:01.485500 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d36ca832-b26c-425f-a730-507c9c9defe8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d36ca832-b26c-425f-a730-507c9c9defe8" (UID: "d36ca832-b26c-425f-a730-507c9c9defe8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:06:01 crc kubenswrapper[5037]: I1126 15:06:01.571237 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d36ca832-b26c-425f-a730-507c9c9defe8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 15:06:01 crc kubenswrapper[5037]: I1126 15:06:01.571272 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ckzm4\" (UniqueName: \"kubernetes.io/projected/d36ca832-b26c-425f-a730-507c9c9defe8-kube-api-access-ckzm4\") on node \"crc\" DevicePath \"\"" Nov 26 15:06:01 crc kubenswrapper[5037]: I1126 15:06:01.571302 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d36ca832-b26c-425f-a730-507c9c9defe8-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 15:06:01 crc kubenswrapper[5037]: I1126 15:06:01.968490 5037 generic.go:334] "Generic (PLEG): container finished" podID="d36ca832-b26c-425f-a730-507c9c9defe8" containerID="622f76830adfea5d9b280337fd8fe9ac685ec56f0216141e8bde838bc18b4741" exitCode=0 Nov 26 15:06:01 crc kubenswrapper[5037]: I1126 15:06:01.968573 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4hvmj" event={"ID":"d36ca832-b26c-425f-a730-507c9c9defe8","Type":"ContainerDied","Data":"622f76830adfea5d9b280337fd8fe9ac685ec56f0216141e8bde838bc18b4741"} Nov 26 15:06:01 crc kubenswrapper[5037]: I1126 15:06:01.968642 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4hvmj" event={"ID":"d36ca832-b26c-425f-a730-507c9c9defe8","Type":"ContainerDied","Data":"aecb72b4d5ff47de85d948817803494c367534de417279e7ffeea5b9b38704de"} Nov 26 15:06:01 crc kubenswrapper[5037]: I1126 15:06:01.968679 5037 scope.go:117] "RemoveContainer" containerID="622f76830adfea5d9b280337fd8fe9ac685ec56f0216141e8bde838bc18b4741" Nov 26 15:06:01 crc kubenswrapper[5037]: I1126 15:06:01.968585 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4hvmj" Nov 26 15:06:02 crc kubenswrapper[5037]: I1126 15:06:02.009507 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4hvmj"] Nov 26 15:06:02 crc kubenswrapper[5037]: I1126 15:06:02.012021 5037 scope.go:117] "RemoveContainer" containerID="03dd32b0c550aff98eb3ca0f873d0f0b520e88c77638d7fe26b3be140c72d38c" Nov 26 15:06:02 crc kubenswrapper[5037]: I1126 15:06:02.020635 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-4hvmj"] Nov 26 15:06:02 crc kubenswrapper[5037]: I1126 15:06:02.034125 5037 scope.go:117] "RemoveContainer" containerID="3704a6156d465777cdbcb646a2488fc94ac2140d3755f6dd24f700d1736fed65" Nov 26 15:06:02 crc kubenswrapper[5037]: I1126 15:06:02.064826 5037 scope.go:117] "RemoveContainer" containerID="622f76830adfea5d9b280337fd8fe9ac685ec56f0216141e8bde838bc18b4741" Nov 26 15:06:02 crc kubenswrapper[5037]: E1126 15:06:02.065536 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"622f76830adfea5d9b280337fd8fe9ac685ec56f0216141e8bde838bc18b4741\": container with ID starting with 622f76830adfea5d9b280337fd8fe9ac685ec56f0216141e8bde838bc18b4741 not found: ID does not exist" containerID="622f76830adfea5d9b280337fd8fe9ac685ec56f0216141e8bde838bc18b4741" Nov 26 15:06:02 crc kubenswrapper[5037]: I1126 15:06:02.065864 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"622f76830adfea5d9b280337fd8fe9ac685ec56f0216141e8bde838bc18b4741"} err="failed to get container status \"622f76830adfea5d9b280337fd8fe9ac685ec56f0216141e8bde838bc18b4741\": rpc error: code = NotFound desc = could not find container \"622f76830adfea5d9b280337fd8fe9ac685ec56f0216141e8bde838bc18b4741\": container with ID starting with 622f76830adfea5d9b280337fd8fe9ac685ec56f0216141e8bde838bc18b4741 not found: ID does not exist" Nov 26 15:06:02 crc kubenswrapper[5037]: I1126 15:06:02.065974 5037 scope.go:117] "RemoveContainer" containerID="03dd32b0c550aff98eb3ca0f873d0f0b520e88c77638d7fe26b3be140c72d38c" Nov 26 15:06:02 crc kubenswrapper[5037]: E1126 15:06:02.066590 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"03dd32b0c550aff98eb3ca0f873d0f0b520e88c77638d7fe26b3be140c72d38c\": container with ID starting with 03dd32b0c550aff98eb3ca0f873d0f0b520e88c77638d7fe26b3be140c72d38c not found: ID does not exist" containerID="03dd32b0c550aff98eb3ca0f873d0f0b520e88c77638d7fe26b3be140c72d38c" Nov 26 15:06:02 crc kubenswrapper[5037]: I1126 15:06:02.066652 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"03dd32b0c550aff98eb3ca0f873d0f0b520e88c77638d7fe26b3be140c72d38c"} err="failed to get container status \"03dd32b0c550aff98eb3ca0f873d0f0b520e88c77638d7fe26b3be140c72d38c\": rpc error: code = NotFound desc = could not find container \"03dd32b0c550aff98eb3ca0f873d0f0b520e88c77638d7fe26b3be140c72d38c\": container with ID starting with 03dd32b0c550aff98eb3ca0f873d0f0b520e88c77638d7fe26b3be140c72d38c not found: ID does not exist" Nov 26 15:06:02 crc kubenswrapper[5037]: I1126 15:06:02.066690 5037 scope.go:117] "RemoveContainer" containerID="3704a6156d465777cdbcb646a2488fc94ac2140d3755f6dd24f700d1736fed65" Nov 26 15:06:02 crc kubenswrapper[5037]: E1126 15:06:02.067198 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3704a6156d465777cdbcb646a2488fc94ac2140d3755f6dd24f700d1736fed65\": container with ID starting with 3704a6156d465777cdbcb646a2488fc94ac2140d3755f6dd24f700d1736fed65 not found: ID does not exist" containerID="3704a6156d465777cdbcb646a2488fc94ac2140d3755f6dd24f700d1736fed65" Nov 26 15:06:02 crc kubenswrapper[5037]: I1126 15:06:02.067244 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3704a6156d465777cdbcb646a2488fc94ac2140d3755f6dd24f700d1736fed65"} err="failed to get container status \"3704a6156d465777cdbcb646a2488fc94ac2140d3755f6dd24f700d1736fed65\": rpc error: code = NotFound desc = could not find container \"3704a6156d465777cdbcb646a2488fc94ac2140d3755f6dd24f700d1736fed65\": container with ID starting with 3704a6156d465777cdbcb646a2488fc94ac2140d3755f6dd24f700d1736fed65 not found: ID does not exist" Nov 26 15:06:03 crc kubenswrapper[5037]: I1126 15:06:03.932140 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d36ca832-b26c-425f-a730-507c9c9defe8" path="/var/lib/kubelet/pods/d36ca832-b26c-425f-a730-507c9c9defe8/volumes" Nov 26 15:06:14 crc kubenswrapper[5037]: I1126 15:06:14.908727 5037 scope.go:117] "RemoveContainer" containerID="c17fa9163e25f42ca62fe36b2c3a9409a3069b6f71f2694fb63cb4b9446e2de1" Nov 26 15:06:14 crc kubenswrapper[5037]: E1126 15:06:14.910830 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:06:25 crc kubenswrapper[5037]: I1126 15:06:25.909486 5037 scope.go:117] "RemoveContainer" containerID="c17fa9163e25f42ca62fe36b2c3a9409a3069b6f71f2694fb63cb4b9446e2de1" Nov 26 15:06:25 crc kubenswrapper[5037]: E1126 15:06:25.910690 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:06:39 crc kubenswrapper[5037]: I1126 15:06:39.908355 5037 scope.go:117] "RemoveContainer" containerID="c17fa9163e25f42ca62fe36b2c3a9409a3069b6f71f2694fb63cb4b9446e2de1" Nov 26 15:06:39 crc kubenswrapper[5037]: E1126 15:06:39.909567 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:06:50 crc kubenswrapper[5037]: I1126 15:06:50.908717 5037 scope.go:117] "RemoveContainer" containerID="c17fa9163e25f42ca62fe36b2c3a9409a3069b6f71f2694fb63cb4b9446e2de1" Nov 26 15:06:50 crc kubenswrapper[5037]: E1126 15:06:50.910140 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:07:02 crc kubenswrapper[5037]: I1126 15:07:02.908266 5037 scope.go:117] "RemoveContainer" containerID="c17fa9163e25f42ca62fe36b2c3a9409a3069b6f71f2694fb63cb4b9446e2de1" Nov 26 15:07:02 crc kubenswrapper[5037]: E1126 15:07:02.909646 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:07:16 crc kubenswrapper[5037]: I1126 15:07:16.909277 5037 scope.go:117] "RemoveContainer" containerID="c17fa9163e25f42ca62fe36b2c3a9409a3069b6f71f2694fb63cb4b9446e2de1" Nov 26 15:07:16 crc kubenswrapper[5037]: E1126 15:07:16.910577 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:07:29 crc kubenswrapper[5037]: I1126 15:07:29.908474 5037 scope.go:117] "RemoveContainer" containerID="c17fa9163e25f42ca62fe36b2c3a9409a3069b6f71f2694fb63cb4b9446e2de1" Nov 26 15:07:29 crc kubenswrapper[5037]: E1126 15:07:29.909086 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:07:41 crc kubenswrapper[5037]: I1126 15:07:41.908357 5037 scope.go:117] "RemoveContainer" containerID="c17fa9163e25f42ca62fe36b2c3a9409a3069b6f71f2694fb63cb4b9446e2de1" Nov 26 15:07:41 crc kubenswrapper[5037]: E1126 15:07:41.909361 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:07:46 crc kubenswrapper[5037]: I1126 15:07:46.465178 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-j8hw5"] Nov 26 15:07:46 crc kubenswrapper[5037]: E1126 15:07:46.467777 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d36ca832-b26c-425f-a730-507c9c9defe8" containerName="extract-content" Nov 26 15:07:46 crc kubenswrapper[5037]: I1126 15:07:46.467797 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="d36ca832-b26c-425f-a730-507c9c9defe8" containerName="extract-content" Nov 26 15:07:46 crc kubenswrapper[5037]: E1126 15:07:46.467840 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d36ca832-b26c-425f-a730-507c9c9defe8" containerName="extract-utilities" Nov 26 15:07:46 crc kubenswrapper[5037]: I1126 15:07:46.467848 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="d36ca832-b26c-425f-a730-507c9c9defe8" containerName="extract-utilities" Nov 26 15:07:46 crc kubenswrapper[5037]: E1126 15:07:46.467861 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d36ca832-b26c-425f-a730-507c9c9defe8" containerName="registry-server" Nov 26 15:07:46 crc kubenswrapper[5037]: I1126 15:07:46.467869 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="d36ca832-b26c-425f-a730-507c9c9defe8" containerName="registry-server" Nov 26 15:07:46 crc kubenswrapper[5037]: I1126 15:07:46.468041 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="d36ca832-b26c-425f-a730-507c9c9defe8" containerName="registry-server" Nov 26 15:07:46 crc kubenswrapper[5037]: I1126 15:07:46.469786 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-j8hw5" Nov 26 15:07:46 crc kubenswrapper[5037]: I1126 15:07:46.486553 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-j8hw5"] Nov 26 15:07:46 crc kubenswrapper[5037]: I1126 15:07:46.597982 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xpwnt\" (UniqueName: \"kubernetes.io/projected/5ed64604-d7e4-4b38-af94-bbdc8fe13ec2-kube-api-access-xpwnt\") pod \"community-operators-j8hw5\" (UID: \"5ed64604-d7e4-4b38-af94-bbdc8fe13ec2\") " pod="openshift-marketplace/community-operators-j8hw5" Nov 26 15:07:46 crc kubenswrapper[5037]: I1126 15:07:46.598066 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ed64604-d7e4-4b38-af94-bbdc8fe13ec2-utilities\") pod \"community-operators-j8hw5\" (UID: \"5ed64604-d7e4-4b38-af94-bbdc8fe13ec2\") " pod="openshift-marketplace/community-operators-j8hw5" Nov 26 15:07:46 crc kubenswrapper[5037]: I1126 15:07:46.598113 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ed64604-d7e4-4b38-af94-bbdc8fe13ec2-catalog-content\") pod \"community-operators-j8hw5\" (UID: \"5ed64604-d7e4-4b38-af94-bbdc8fe13ec2\") " pod="openshift-marketplace/community-operators-j8hw5" Nov 26 15:07:46 crc kubenswrapper[5037]: I1126 15:07:46.650546 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-jjfjj"] Nov 26 15:07:46 crc kubenswrapper[5037]: I1126 15:07:46.651961 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jjfjj" Nov 26 15:07:46 crc kubenswrapper[5037]: I1126 15:07:46.666062 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jjfjj"] Nov 26 15:07:46 crc kubenswrapper[5037]: I1126 15:07:46.700128 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ed64604-d7e4-4b38-af94-bbdc8fe13ec2-utilities\") pod \"community-operators-j8hw5\" (UID: \"5ed64604-d7e4-4b38-af94-bbdc8fe13ec2\") " pod="openshift-marketplace/community-operators-j8hw5" Nov 26 15:07:46 crc kubenswrapper[5037]: I1126 15:07:46.700217 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ed64604-d7e4-4b38-af94-bbdc8fe13ec2-catalog-content\") pod \"community-operators-j8hw5\" (UID: \"5ed64604-d7e4-4b38-af94-bbdc8fe13ec2\") " pod="openshift-marketplace/community-operators-j8hw5" Nov 26 15:07:46 crc kubenswrapper[5037]: I1126 15:07:46.700303 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xpwnt\" (UniqueName: \"kubernetes.io/projected/5ed64604-d7e4-4b38-af94-bbdc8fe13ec2-kube-api-access-xpwnt\") pod \"community-operators-j8hw5\" (UID: \"5ed64604-d7e4-4b38-af94-bbdc8fe13ec2\") " pod="openshift-marketplace/community-operators-j8hw5" Nov 26 15:07:46 crc kubenswrapper[5037]: I1126 15:07:46.700989 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ed64604-d7e4-4b38-af94-bbdc8fe13ec2-utilities\") pod \"community-operators-j8hw5\" (UID: \"5ed64604-d7e4-4b38-af94-bbdc8fe13ec2\") " pod="openshift-marketplace/community-operators-j8hw5" Nov 26 15:07:46 crc kubenswrapper[5037]: I1126 15:07:46.701050 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ed64604-d7e4-4b38-af94-bbdc8fe13ec2-catalog-content\") pod \"community-operators-j8hw5\" (UID: \"5ed64604-d7e4-4b38-af94-bbdc8fe13ec2\") " pod="openshift-marketplace/community-operators-j8hw5" Nov 26 15:07:46 crc kubenswrapper[5037]: I1126 15:07:46.726855 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xpwnt\" (UniqueName: \"kubernetes.io/projected/5ed64604-d7e4-4b38-af94-bbdc8fe13ec2-kube-api-access-xpwnt\") pod \"community-operators-j8hw5\" (UID: \"5ed64604-d7e4-4b38-af94-bbdc8fe13ec2\") " pod="openshift-marketplace/community-operators-j8hw5" Nov 26 15:07:46 crc kubenswrapper[5037]: I1126 15:07:46.801786 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-j8hw5" Nov 26 15:07:46 crc kubenswrapper[5037]: I1126 15:07:46.802095 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d48f8419-3376-43b1-8177-51cdd23c8c18-utilities\") pod \"certified-operators-jjfjj\" (UID: \"d48f8419-3376-43b1-8177-51cdd23c8c18\") " pod="openshift-marketplace/certified-operators-jjfjj" Nov 26 15:07:46 crc kubenswrapper[5037]: I1126 15:07:46.802149 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d48f8419-3376-43b1-8177-51cdd23c8c18-catalog-content\") pod \"certified-operators-jjfjj\" (UID: \"d48f8419-3376-43b1-8177-51cdd23c8c18\") " pod="openshift-marketplace/certified-operators-jjfjj" Nov 26 15:07:46 crc kubenswrapper[5037]: I1126 15:07:46.802179 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cm2qf\" (UniqueName: \"kubernetes.io/projected/d48f8419-3376-43b1-8177-51cdd23c8c18-kube-api-access-cm2qf\") pod \"certified-operators-jjfjj\" (UID: \"d48f8419-3376-43b1-8177-51cdd23c8c18\") " pod="openshift-marketplace/certified-operators-jjfjj" Nov 26 15:07:46 crc kubenswrapper[5037]: I1126 15:07:46.904176 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d48f8419-3376-43b1-8177-51cdd23c8c18-catalog-content\") pod \"certified-operators-jjfjj\" (UID: \"d48f8419-3376-43b1-8177-51cdd23c8c18\") " pod="openshift-marketplace/certified-operators-jjfjj" Nov 26 15:07:46 crc kubenswrapper[5037]: I1126 15:07:46.904481 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cm2qf\" (UniqueName: \"kubernetes.io/projected/d48f8419-3376-43b1-8177-51cdd23c8c18-kube-api-access-cm2qf\") pod \"certified-operators-jjfjj\" (UID: \"d48f8419-3376-43b1-8177-51cdd23c8c18\") " pod="openshift-marketplace/certified-operators-jjfjj" Nov 26 15:07:46 crc kubenswrapper[5037]: I1126 15:07:46.904568 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d48f8419-3376-43b1-8177-51cdd23c8c18-utilities\") pod \"certified-operators-jjfjj\" (UID: \"d48f8419-3376-43b1-8177-51cdd23c8c18\") " pod="openshift-marketplace/certified-operators-jjfjj" Nov 26 15:07:46 crc kubenswrapper[5037]: I1126 15:07:46.905067 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d48f8419-3376-43b1-8177-51cdd23c8c18-utilities\") pod \"certified-operators-jjfjj\" (UID: \"d48f8419-3376-43b1-8177-51cdd23c8c18\") " pod="openshift-marketplace/certified-operators-jjfjj" Nov 26 15:07:46 crc kubenswrapper[5037]: I1126 15:07:46.905131 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d48f8419-3376-43b1-8177-51cdd23c8c18-catalog-content\") pod \"certified-operators-jjfjj\" (UID: \"d48f8419-3376-43b1-8177-51cdd23c8c18\") " pod="openshift-marketplace/certified-operators-jjfjj" Nov 26 15:07:46 crc kubenswrapper[5037]: I1126 15:07:46.951094 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cm2qf\" (UniqueName: \"kubernetes.io/projected/d48f8419-3376-43b1-8177-51cdd23c8c18-kube-api-access-cm2qf\") pod \"certified-operators-jjfjj\" (UID: \"d48f8419-3376-43b1-8177-51cdd23c8c18\") " pod="openshift-marketplace/certified-operators-jjfjj" Nov 26 15:07:46 crc kubenswrapper[5037]: I1126 15:07:46.973600 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jjfjj" Nov 26 15:07:47 crc kubenswrapper[5037]: I1126 15:07:47.316705 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-j8hw5"] Nov 26 15:07:47 crc kubenswrapper[5037]: I1126 15:07:47.324405 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jjfjj"] Nov 26 15:07:48 crc kubenswrapper[5037]: I1126 15:07:48.029918 5037 generic.go:334] "Generic (PLEG): container finished" podID="d48f8419-3376-43b1-8177-51cdd23c8c18" containerID="dcec3171947def875435e36182d9dd0e49e4dad531ebe97d5c6b35e73f296aa3" exitCode=0 Nov 26 15:07:48 crc kubenswrapper[5037]: I1126 15:07:48.030028 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jjfjj" event={"ID":"d48f8419-3376-43b1-8177-51cdd23c8c18","Type":"ContainerDied","Data":"dcec3171947def875435e36182d9dd0e49e4dad531ebe97d5c6b35e73f296aa3"} Nov 26 15:07:48 crc kubenswrapper[5037]: I1126 15:07:48.030068 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jjfjj" event={"ID":"d48f8419-3376-43b1-8177-51cdd23c8c18","Type":"ContainerStarted","Data":"67d64910672b0a37144b637b8a7db1f8fa224a81b134f8f9b1360dff22f716c1"} Nov 26 15:07:48 crc kubenswrapper[5037]: I1126 15:07:48.033637 5037 generic.go:334] "Generic (PLEG): container finished" podID="5ed64604-d7e4-4b38-af94-bbdc8fe13ec2" containerID="9f3fbf8129ac8b0a62712c832686ce50854ec0b6a944b7b71e6ae0bca682ba20" exitCode=0 Nov 26 15:07:48 crc kubenswrapper[5037]: I1126 15:07:48.033689 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j8hw5" event={"ID":"5ed64604-d7e4-4b38-af94-bbdc8fe13ec2","Type":"ContainerDied","Data":"9f3fbf8129ac8b0a62712c832686ce50854ec0b6a944b7b71e6ae0bca682ba20"} Nov 26 15:07:48 crc kubenswrapper[5037]: I1126 15:07:48.033728 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j8hw5" event={"ID":"5ed64604-d7e4-4b38-af94-bbdc8fe13ec2","Type":"ContainerStarted","Data":"31e7876181031b334315220f379caba94f464cd935e2688cb23dbc2831efe227"} Nov 26 15:07:50 crc kubenswrapper[5037]: I1126 15:07:50.049363 5037 generic.go:334] "Generic (PLEG): container finished" podID="d48f8419-3376-43b1-8177-51cdd23c8c18" containerID="8891d2ef600faedf44de0cfc2fab9d086c016e19d3a1c5889056b3c6e35eb963" exitCode=0 Nov 26 15:07:50 crc kubenswrapper[5037]: I1126 15:07:50.049539 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jjfjj" event={"ID":"d48f8419-3376-43b1-8177-51cdd23c8c18","Type":"ContainerDied","Data":"8891d2ef600faedf44de0cfc2fab9d086c016e19d3a1c5889056b3c6e35eb963"} Nov 26 15:07:50 crc kubenswrapper[5037]: I1126 15:07:50.051952 5037 generic.go:334] "Generic (PLEG): container finished" podID="5ed64604-d7e4-4b38-af94-bbdc8fe13ec2" containerID="831a344ee2cce4a0b951610a510e1658ce954756f3b2e5cd6a2828eb6436290a" exitCode=0 Nov 26 15:07:50 crc kubenswrapper[5037]: I1126 15:07:50.051992 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j8hw5" event={"ID":"5ed64604-d7e4-4b38-af94-bbdc8fe13ec2","Type":"ContainerDied","Data":"831a344ee2cce4a0b951610a510e1658ce954756f3b2e5cd6a2828eb6436290a"} Nov 26 15:07:51 crc kubenswrapper[5037]: I1126 15:07:51.062087 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j8hw5" event={"ID":"5ed64604-d7e4-4b38-af94-bbdc8fe13ec2","Type":"ContainerStarted","Data":"74caa807eff2ae1e9ab60de12fb50be82a6bf5224aacac6e7ba4ac36523d8fc9"} Nov 26 15:07:51 crc kubenswrapper[5037]: I1126 15:07:51.065973 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jjfjj" event={"ID":"d48f8419-3376-43b1-8177-51cdd23c8c18","Type":"ContainerStarted","Data":"b0b21ccc630cf7d5951459d69bf4390f985f1f11e1a118b9092bf6f05b94fd8e"} Nov 26 15:07:51 crc kubenswrapper[5037]: I1126 15:07:51.120970 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-j8hw5" podStartSLOduration=2.6220937109999998 podStartE2EDuration="5.120948017s" podCreationTimestamp="2025-11-26 15:07:46 +0000 UTC" firstStartedPulling="2025-11-26 15:07:48.035645297 +0000 UTC m=+3134.832415521" lastFinishedPulling="2025-11-26 15:07:50.534499633 +0000 UTC m=+3137.331269827" observedRunningTime="2025-11-26 15:07:51.088903556 +0000 UTC m=+3137.885673750" watchObservedRunningTime="2025-11-26 15:07:51.120948017 +0000 UTC m=+3137.917718231" Nov 26 15:07:51 crc kubenswrapper[5037]: I1126 15:07:51.121246 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-jjfjj" podStartSLOduration=2.6629027560000003 podStartE2EDuration="5.121238844s" podCreationTimestamp="2025-11-26 15:07:46 +0000 UTC" firstStartedPulling="2025-11-26 15:07:48.031535047 +0000 UTC m=+3134.828305241" lastFinishedPulling="2025-11-26 15:07:50.489871125 +0000 UTC m=+3137.286641329" observedRunningTime="2025-11-26 15:07:51.110713438 +0000 UTC m=+3137.907483662" watchObservedRunningTime="2025-11-26 15:07:51.121238844 +0000 UTC m=+3137.918009068" Nov 26 15:07:53 crc kubenswrapper[5037]: I1126 15:07:53.917766 5037 scope.go:117] "RemoveContainer" containerID="c17fa9163e25f42ca62fe36b2c3a9409a3069b6f71f2694fb63cb4b9446e2de1" Nov 26 15:07:53 crc kubenswrapper[5037]: E1126 15:07:53.918691 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:07:56 crc kubenswrapper[5037]: I1126 15:07:56.802683 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-j8hw5" Nov 26 15:07:56 crc kubenswrapper[5037]: I1126 15:07:56.803140 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-j8hw5" Nov 26 15:07:56 crc kubenswrapper[5037]: I1126 15:07:56.866893 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-j8hw5" Nov 26 15:07:56 crc kubenswrapper[5037]: I1126 15:07:56.974348 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-jjfjj" Nov 26 15:07:56 crc kubenswrapper[5037]: I1126 15:07:56.974390 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-jjfjj" Nov 26 15:07:57 crc kubenswrapper[5037]: I1126 15:07:57.030366 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-jjfjj" Nov 26 15:07:57 crc kubenswrapper[5037]: I1126 15:07:57.164177 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-jjfjj" Nov 26 15:07:57 crc kubenswrapper[5037]: I1126 15:07:57.195069 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-j8hw5" Nov 26 15:07:58 crc kubenswrapper[5037]: I1126 15:07:58.919259 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-jjfjj"] Nov 26 15:07:59 crc kubenswrapper[5037]: I1126 15:07:59.131239 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-jjfjj" podUID="d48f8419-3376-43b1-8177-51cdd23c8c18" containerName="registry-server" containerID="cri-o://b0b21ccc630cf7d5951459d69bf4390f985f1f11e1a118b9092bf6f05b94fd8e" gracePeriod=2 Nov 26 15:07:59 crc kubenswrapper[5037]: I1126 15:07:59.515559 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-j8hw5"] Nov 26 15:07:59 crc kubenswrapper[5037]: I1126 15:07:59.516198 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-j8hw5" podUID="5ed64604-d7e4-4b38-af94-bbdc8fe13ec2" containerName="registry-server" containerID="cri-o://74caa807eff2ae1e9ab60de12fb50be82a6bf5224aacac6e7ba4ac36523d8fc9" gracePeriod=2 Nov 26 15:07:59 crc kubenswrapper[5037]: I1126 15:07:59.662764 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jjfjj" Nov 26 15:07:59 crc kubenswrapper[5037]: I1126 15:07:59.730854 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d48f8419-3376-43b1-8177-51cdd23c8c18-utilities\") pod \"d48f8419-3376-43b1-8177-51cdd23c8c18\" (UID: \"d48f8419-3376-43b1-8177-51cdd23c8c18\") " Nov 26 15:07:59 crc kubenswrapper[5037]: I1126 15:07:59.730910 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d48f8419-3376-43b1-8177-51cdd23c8c18-catalog-content\") pod \"d48f8419-3376-43b1-8177-51cdd23c8c18\" (UID: \"d48f8419-3376-43b1-8177-51cdd23c8c18\") " Nov 26 15:07:59 crc kubenswrapper[5037]: I1126 15:07:59.730980 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cm2qf\" (UniqueName: \"kubernetes.io/projected/d48f8419-3376-43b1-8177-51cdd23c8c18-kube-api-access-cm2qf\") pod \"d48f8419-3376-43b1-8177-51cdd23c8c18\" (UID: \"d48f8419-3376-43b1-8177-51cdd23c8c18\") " Nov 26 15:07:59 crc kubenswrapper[5037]: I1126 15:07:59.731925 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d48f8419-3376-43b1-8177-51cdd23c8c18-utilities" (OuterVolumeSpecName: "utilities") pod "d48f8419-3376-43b1-8177-51cdd23c8c18" (UID: "d48f8419-3376-43b1-8177-51cdd23c8c18"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:07:59 crc kubenswrapper[5037]: I1126 15:07:59.741908 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d48f8419-3376-43b1-8177-51cdd23c8c18-kube-api-access-cm2qf" (OuterVolumeSpecName: "kube-api-access-cm2qf") pod "d48f8419-3376-43b1-8177-51cdd23c8c18" (UID: "d48f8419-3376-43b1-8177-51cdd23c8c18"). InnerVolumeSpecName "kube-api-access-cm2qf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:07:59 crc kubenswrapper[5037]: I1126 15:07:59.832990 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d48f8419-3376-43b1-8177-51cdd23c8c18-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 15:07:59 crc kubenswrapper[5037]: I1126 15:07:59.833039 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cm2qf\" (UniqueName: \"kubernetes.io/projected/d48f8419-3376-43b1-8177-51cdd23c8c18-kube-api-access-cm2qf\") on node \"crc\" DevicePath \"\"" Nov 26 15:07:59 crc kubenswrapper[5037]: I1126 15:07:59.895873 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-j8hw5" Nov 26 15:07:59 crc kubenswrapper[5037]: I1126 15:07:59.934249 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ed64604-d7e4-4b38-af94-bbdc8fe13ec2-utilities\") pod \"5ed64604-d7e4-4b38-af94-bbdc8fe13ec2\" (UID: \"5ed64604-d7e4-4b38-af94-bbdc8fe13ec2\") " Nov 26 15:07:59 crc kubenswrapper[5037]: I1126 15:07:59.935034 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5ed64604-d7e4-4b38-af94-bbdc8fe13ec2-utilities" (OuterVolumeSpecName: "utilities") pod "5ed64604-d7e4-4b38-af94-bbdc8fe13ec2" (UID: "5ed64604-d7e4-4b38-af94-bbdc8fe13ec2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:07:59 crc kubenswrapper[5037]: I1126 15:07:59.935998 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ed64604-d7e4-4b38-af94-bbdc8fe13ec2-catalog-content\") pod \"5ed64604-d7e4-4b38-af94-bbdc8fe13ec2\" (UID: \"5ed64604-d7e4-4b38-af94-bbdc8fe13ec2\") " Nov 26 15:07:59 crc kubenswrapper[5037]: I1126 15:07:59.936197 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xpwnt\" (UniqueName: \"kubernetes.io/projected/5ed64604-d7e4-4b38-af94-bbdc8fe13ec2-kube-api-access-xpwnt\") pod \"5ed64604-d7e4-4b38-af94-bbdc8fe13ec2\" (UID: \"5ed64604-d7e4-4b38-af94-bbdc8fe13ec2\") " Nov 26 15:07:59 crc kubenswrapper[5037]: I1126 15:07:59.936897 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ed64604-d7e4-4b38-af94-bbdc8fe13ec2-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 15:07:59 crc kubenswrapper[5037]: I1126 15:07:59.938660 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ed64604-d7e4-4b38-af94-bbdc8fe13ec2-kube-api-access-xpwnt" (OuterVolumeSpecName: "kube-api-access-xpwnt") pod "5ed64604-d7e4-4b38-af94-bbdc8fe13ec2" (UID: "5ed64604-d7e4-4b38-af94-bbdc8fe13ec2"). InnerVolumeSpecName "kube-api-access-xpwnt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:08:00 crc kubenswrapper[5037]: I1126 15:08:00.038354 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xpwnt\" (UniqueName: \"kubernetes.io/projected/5ed64604-d7e4-4b38-af94-bbdc8fe13ec2-kube-api-access-xpwnt\") on node \"crc\" DevicePath \"\"" Nov 26 15:08:00 crc kubenswrapper[5037]: I1126 15:08:00.142249 5037 generic.go:334] "Generic (PLEG): container finished" podID="5ed64604-d7e4-4b38-af94-bbdc8fe13ec2" containerID="74caa807eff2ae1e9ab60de12fb50be82a6bf5224aacac6e7ba4ac36523d8fc9" exitCode=0 Nov 26 15:08:00 crc kubenswrapper[5037]: I1126 15:08:00.142341 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j8hw5" event={"ID":"5ed64604-d7e4-4b38-af94-bbdc8fe13ec2","Type":"ContainerDied","Data":"74caa807eff2ae1e9ab60de12fb50be82a6bf5224aacac6e7ba4ac36523d8fc9"} Nov 26 15:08:00 crc kubenswrapper[5037]: I1126 15:08:00.142414 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j8hw5" event={"ID":"5ed64604-d7e4-4b38-af94-bbdc8fe13ec2","Type":"ContainerDied","Data":"31e7876181031b334315220f379caba94f464cd935e2688cb23dbc2831efe227"} Nov 26 15:08:00 crc kubenswrapper[5037]: I1126 15:08:00.142438 5037 scope.go:117] "RemoveContainer" containerID="74caa807eff2ae1e9ab60de12fb50be82a6bf5224aacac6e7ba4ac36523d8fc9" Nov 26 15:08:00 crc kubenswrapper[5037]: I1126 15:08:00.142423 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-j8hw5" Nov 26 15:08:00 crc kubenswrapper[5037]: I1126 15:08:00.147466 5037 generic.go:334] "Generic (PLEG): container finished" podID="d48f8419-3376-43b1-8177-51cdd23c8c18" containerID="b0b21ccc630cf7d5951459d69bf4390f985f1f11e1a118b9092bf6f05b94fd8e" exitCode=0 Nov 26 15:08:00 crc kubenswrapper[5037]: I1126 15:08:00.147510 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jjfjj" event={"ID":"d48f8419-3376-43b1-8177-51cdd23c8c18","Type":"ContainerDied","Data":"b0b21ccc630cf7d5951459d69bf4390f985f1f11e1a118b9092bf6f05b94fd8e"} Nov 26 15:08:00 crc kubenswrapper[5037]: I1126 15:08:00.147534 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jjfjj" event={"ID":"d48f8419-3376-43b1-8177-51cdd23c8c18","Type":"ContainerDied","Data":"67d64910672b0a37144b637b8a7db1f8fa224a81b134f8f9b1360dff22f716c1"} Nov 26 15:08:00 crc kubenswrapper[5037]: I1126 15:08:00.147617 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jjfjj" Nov 26 15:08:00 crc kubenswrapper[5037]: I1126 15:08:00.174907 5037 scope.go:117] "RemoveContainer" containerID="831a344ee2cce4a0b951610a510e1658ce954756f3b2e5cd6a2828eb6436290a" Nov 26 15:08:00 crc kubenswrapper[5037]: I1126 15:08:00.211555 5037 scope.go:117] "RemoveContainer" containerID="9f3fbf8129ac8b0a62712c832686ce50854ec0b6a944b7b71e6ae0bca682ba20" Nov 26 15:08:00 crc kubenswrapper[5037]: I1126 15:08:00.239637 5037 scope.go:117] "RemoveContainer" containerID="74caa807eff2ae1e9ab60de12fb50be82a6bf5224aacac6e7ba4ac36523d8fc9" Nov 26 15:08:00 crc kubenswrapper[5037]: E1126 15:08:00.240197 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74caa807eff2ae1e9ab60de12fb50be82a6bf5224aacac6e7ba4ac36523d8fc9\": container with ID starting with 74caa807eff2ae1e9ab60de12fb50be82a6bf5224aacac6e7ba4ac36523d8fc9 not found: ID does not exist" containerID="74caa807eff2ae1e9ab60de12fb50be82a6bf5224aacac6e7ba4ac36523d8fc9" Nov 26 15:08:00 crc kubenswrapper[5037]: I1126 15:08:00.240266 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74caa807eff2ae1e9ab60de12fb50be82a6bf5224aacac6e7ba4ac36523d8fc9"} err="failed to get container status \"74caa807eff2ae1e9ab60de12fb50be82a6bf5224aacac6e7ba4ac36523d8fc9\": rpc error: code = NotFound desc = could not find container \"74caa807eff2ae1e9ab60de12fb50be82a6bf5224aacac6e7ba4ac36523d8fc9\": container with ID starting with 74caa807eff2ae1e9ab60de12fb50be82a6bf5224aacac6e7ba4ac36523d8fc9 not found: ID does not exist" Nov 26 15:08:00 crc kubenswrapper[5037]: I1126 15:08:00.240412 5037 scope.go:117] "RemoveContainer" containerID="831a344ee2cce4a0b951610a510e1658ce954756f3b2e5cd6a2828eb6436290a" Nov 26 15:08:00 crc kubenswrapper[5037]: E1126 15:08:00.244797 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"831a344ee2cce4a0b951610a510e1658ce954756f3b2e5cd6a2828eb6436290a\": container with ID starting with 831a344ee2cce4a0b951610a510e1658ce954756f3b2e5cd6a2828eb6436290a not found: ID does not exist" containerID="831a344ee2cce4a0b951610a510e1658ce954756f3b2e5cd6a2828eb6436290a" Nov 26 15:08:00 crc kubenswrapper[5037]: I1126 15:08:00.244849 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"831a344ee2cce4a0b951610a510e1658ce954756f3b2e5cd6a2828eb6436290a"} err="failed to get container status \"831a344ee2cce4a0b951610a510e1658ce954756f3b2e5cd6a2828eb6436290a\": rpc error: code = NotFound desc = could not find container \"831a344ee2cce4a0b951610a510e1658ce954756f3b2e5cd6a2828eb6436290a\": container with ID starting with 831a344ee2cce4a0b951610a510e1658ce954756f3b2e5cd6a2828eb6436290a not found: ID does not exist" Nov 26 15:08:00 crc kubenswrapper[5037]: I1126 15:08:00.244880 5037 scope.go:117] "RemoveContainer" containerID="9f3fbf8129ac8b0a62712c832686ce50854ec0b6a944b7b71e6ae0bca682ba20" Nov 26 15:08:00 crc kubenswrapper[5037]: E1126 15:08:00.245351 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9f3fbf8129ac8b0a62712c832686ce50854ec0b6a944b7b71e6ae0bca682ba20\": container with ID starting with 9f3fbf8129ac8b0a62712c832686ce50854ec0b6a944b7b71e6ae0bca682ba20 not found: ID does not exist" containerID="9f3fbf8129ac8b0a62712c832686ce50854ec0b6a944b7b71e6ae0bca682ba20" Nov 26 15:08:00 crc kubenswrapper[5037]: I1126 15:08:00.245383 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f3fbf8129ac8b0a62712c832686ce50854ec0b6a944b7b71e6ae0bca682ba20"} err="failed to get container status \"9f3fbf8129ac8b0a62712c832686ce50854ec0b6a944b7b71e6ae0bca682ba20\": rpc error: code = NotFound desc = could not find container \"9f3fbf8129ac8b0a62712c832686ce50854ec0b6a944b7b71e6ae0bca682ba20\": container with ID starting with 9f3fbf8129ac8b0a62712c832686ce50854ec0b6a944b7b71e6ae0bca682ba20 not found: ID does not exist" Nov 26 15:08:00 crc kubenswrapper[5037]: I1126 15:08:00.245405 5037 scope.go:117] "RemoveContainer" containerID="b0b21ccc630cf7d5951459d69bf4390f985f1f11e1a118b9092bf6f05b94fd8e" Nov 26 15:08:00 crc kubenswrapper[5037]: I1126 15:08:00.264386 5037 scope.go:117] "RemoveContainer" containerID="8891d2ef600faedf44de0cfc2fab9d086c016e19d3a1c5889056b3c6e35eb963" Nov 26 15:08:00 crc kubenswrapper[5037]: I1126 15:08:00.286897 5037 scope.go:117] "RemoveContainer" containerID="dcec3171947def875435e36182d9dd0e49e4dad531ebe97d5c6b35e73f296aa3" Nov 26 15:08:00 crc kubenswrapper[5037]: I1126 15:08:00.369078 5037 scope.go:117] "RemoveContainer" containerID="b0b21ccc630cf7d5951459d69bf4390f985f1f11e1a118b9092bf6f05b94fd8e" Nov 26 15:08:00 crc kubenswrapper[5037]: E1126 15:08:00.369819 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b0b21ccc630cf7d5951459d69bf4390f985f1f11e1a118b9092bf6f05b94fd8e\": container with ID starting with b0b21ccc630cf7d5951459d69bf4390f985f1f11e1a118b9092bf6f05b94fd8e not found: ID does not exist" containerID="b0b21ccc630cf7d5951459d69bf4390f985f1f11e1a118b9092bf6f05b94fd8e" Nov 26 15:08:00 crc kubenswrapper[5037]: I1126 15:08:00.369871 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b0b21ccc630cf7d5951459d69bf4390f985f1f11e1a118b9092bf6f05b94fd8e"} err="failed to get container status \"b0b21ccc630cf7d5951459d69bf4390f985f1f11e1a118b9092bf6f05b94fd8e\": rpc error: code = NotFound desc = could not find container \"b0b21ccc630cf7d5951459d69bf4390f985f1f11e1a118b9092bf6f05b94fd8e\": container with ID starting with b0b21ccc630cf7d5951459d69bf4390f985f1f11e1a118b9092bf6f05b94fd8e not found: ID does not exist" Nov 26 15:08:00 crc kubenswrapper[5037]: I1126 15:08:00.369906 5037 scope.go:117] "RemoveContainer" containerID="8891d2ef600faedf44de0cfc2fab9d086c016e19d3a1c5889056b3c6e35eb963" Nov 26 15:08:00 crc kubenswrapper[5037]: E1126 15:08:00.370472 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8891d2ef600faedf44de0cfc2fab9d086c016e19d3a1c5889056b3c6e35eb963\": container with ID starting with 8891d2ef600faedf44de0cfc2fab9d086c016e19d3a1c5889056b3c6e35eb963 not found: ID does not exist" containerID="8891d2ef600faedf44de0cfc2fab9d086c016e19d3a1c5889056b3c6e35eb963" Nov 26 15:08:00 crc kubenswrapper[5037]: I1126 15:08:00.370511 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8891d2ef600faedf44de0cfc2fab9d086c016e19d3a1c5889056b3c6e35eb963"} err="failed to get container status \"8891d2ef600faedf44de0cfc2fab9d086c016e19d3a1c5889056b3c6e35eb963\": rpc error: code = NotFound desc = could not find container \"8891d2ef600faedf44de0cfc2fab9d086c016e19d3a1c5889056b3c6e35eb963\": container with ID starting with 8891d2ef600faedf44de0cfc2fab9d086c016e19d3a1c5889056b3c6e35eb963 not found: ID does not exist" Nov 26 15:08:00 crc kubenswrapper[5037]: I1126 15:08:00.370537 5037 scope.go:117] "RemoveContainer" containerID="dcec3171947def875435e36182d9dd0e49e4dad531ebe97d5c6b35e73f296aa3" Nov 26 15:08:00 crc kubenswrapper[5037]: E1126 15:08:00.371028 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dcec3171947def875435e36182d9dd0e49e4dad531ebe97d5c6b35e73f296aa3\": container with ID starting with dcec3171947def875435e36182d9dd0e49e4dad531ebe97d5c6b35e73f296aa3 not found: ID does not exist" containerID="dcec3171947def875435e36182d9dd0e49e4dad531ebe97d5c6b35e73f296aa3" Nov 26 15:08:00 crc kubenswrapper[5037]: I1126 15:08:00.371066 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dcec3171947def875435e36182d9dd0e49e4dad531ebe97d5c6b35e73f296aa3"} err="failed to get container status \"dcec3171947def875435e36182d9dd0e49e4dad531ebe97d5c6b35e73f296aa3\": rpc error: code = NotFound desc = could not find container \"dcec3171947def875435e36182d9dd0e49e4dad531ebe97d5c6b35e73f296aa3\": container with ID starting with dcec3171947def875435e36182d9dd0e49e4dad531ebe97d5c6b35e73f296aa3 not found: ID does not exist" Nov 26 15:08:00 crc kubenswrapper[5037]: I1126 15:08:00.906377 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d48f8419-3376-43b1-8177-51cdd23c8c18-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d48f8419-3376-43b1-8177-51cdd23c8c18" (UID: "d48f8419-3376-43b1-8177-51cdd23c8c18"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:08:00 crc kubenswrapper[5037]: I1126 15:08:00.952996 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d48f8419-3376-43b1-8177-51cdd23c8c18-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 15:08:00 crc kubenswrapper[5037]: I1126 15:08:00.988958 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5ed64604-d7e4-4b38-af94-bbdc8fe13ec2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5ed64604-d7e4-4b38-af94-bbdc8fe13ec2" (UID: "5ed64604-d7e4-4b38-af94-bbdc8fe13ec2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:08:01 crc kubenswrapper[5037]: I1126 15:08:01.054971 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ed64604-d7e4-4b38-af94-bbdc8fe13ec2-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 15:08:01 crc kubenswrapper[5037]: I1126 15:08:01.095097 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-j8hw5"] Nov 26 15:08:01 crc kubenswrapper[5037]: I1126 15:08:01.101342 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-j8hw5"] Nov 26 15:08:01 crc kubenswrapper[5037]: I1126 15:08:01.107280 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-jjfjj"] Nov 26 15:08:01 crc kubenswrapper[5037]: I1126 15:08:01.112751 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-jjfjj"] Nov 26 15:08:01 crc kubenswrapper[5037]: I1126 15:08:01.922344 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ed64604-d7e4-4b38-af94-bbdc8fe13ec2" path="/var/lib/kubelet/pods/5ed64604-d7e4-4b38-af94-bbdc8fe13ec2/volumes" Nov 26 15:08:01 crc kubenswrapper[5037]: I1126 15:08:01.923044 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d48f8419-3376-43b1-8177-51cdd23c8c18" path="/var/lib/kubelet/pods/d48f8419-3376-43b1-8177-51cdd23c8c18/volumes" Nov 26 15:08:07 crc kubenswrapper[5037]: I1126 15:08:07.909130 5037 scope.go:117] "RemoveContainer" containerID="c17fa9163e25f42ca62fe36b2c3a9409a3069b6f71f2694fb63cb4b9446e2de1" Nov 26 15:08:07 crc kubenswrapper[5037]: E1126 15:08:07.910190 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:08:20 crc kubenswrapper[5037]: I1126 15:08:20.909143 5037 scope.go:117] "RemoveContainer" containerID="c17fa9163e25f42ca62fe36b2c3a9409a3069b6f71f2694fb63cb4b9446e2de1" Nov 26 15:08:20 crc kubenswrapper[5037]: E1126 15:08:20.909870 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:08:35 crc kubenswrapper[5037]: I1126 15:08:35.908788 5037 scope.go:117] "RemoveContainer" containerID="c17fa9163e25f42ca62fe36b2c3a9409a3069b6f71f2694fb63cb4b9446e2de1" Nov 26 15:08:35 crc kubenswrapper[5037]: E1126 15:08:35.909778 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:08:50 crc kubenswrapper[5037]: I1126 15:08:50.909358 5037 scope.go:117] "RemoveContainer" containerID="c17fa9163e25f42ca62fe36b2c3a9409a3069b6f71f2694fb63cb4b9446e2de1" Nov 26 15:08:51 crc kubenswrapper[5037]: I1126 15:08:51.601149 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" event={"ID":"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb","Type":"ContainerStarted","Data":"0e20b02fa213c98046037946a51795fa186938aa52bb32cb8338a014f35c8f29"} Nov 26 15:11:11 crc kubenswrapper[5037]: I1126 15:11:11.247867 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:11:11 crc kubenswrapper[5037]: I1126 15:11:11.248616 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:11:41 crc kubenswrapper[5037]: I1126 15:11:41.247880 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:11:41 crc kubenswrapper[5037]: I1126 15:11:41.248523 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:12:11 crc kubenswrapper[5037]: I1126 15:12:11.247854 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:12:11 crc kubenswrapper[5037]: I1126 15:12:11.248614 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:12:11 crc kubenswrapper[5037]: I1126 15:12:11.248708 5037 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" Nov 26 15:12:11 crc kubenswrapper[5037]: I1126 15:12:11.250121 5037 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0e20b02fa213c98046037946a51795fa186938aa52bb32cb8338a014f35c8f29"} pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 15:12:11 crc kubenswrapper[5037]: I1126 15:12:11.250238 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" containerID="cri-o://0e20b02fa213c98046037946a51795fa186938aa52bb32cb8338a014f35c8f29" gracePeriod=600 Nov 26 15:12:11 crc kubenswrapper[5037]: I1126 15:12:11.528322 5037 generic.go:334] "Generic (PLEG): container finished" podID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerID="0e20b02fa213c98046037946a51795fa186938aa52bb32cb8338a014f35c8f29" exitCode=0 Nov 26 15:12:11 crc kubenswrapper[5037]: I1126 15:12:11.528462 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" event={"ID":"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb","Type":"ContainerDied","Data":"0e20b02fa213c98046037946a51795fa186938aa52bb32cb8338a014f35c8f29"} Nov 26 15:12:11 crc kubenswrapper[5037]: I1126 15:12:11.528653 5037 scope.go:117] "RemoveContainer" containerID="c17fa9163e25f42ca62fe36b2c3a9409a3069b6f71f2694fb63cb4b9446e2de1" Nov 26 15:12:12 crc kubenswrapper[5037]: I1126 15:12:12.538400 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" event={"ID":"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb","Type":"ContainerStarted","Data":"42296f169d08c107878aa61c500043474fe4185d69e7bce0567b40289de11c2f"} Nov 26 15:14:11 crc kubenswrapper[5037]: I1126 15:14:11.247571 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:14:11 crc kubenswrapper[5037]: I1126 15:14:11.248513 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:14:41 crc kubenswrapper[5037]: I1126 15:14:41.247080 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:14:41 crc kubenswrapper[5037]: I1126 15:14:41.248087 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:15:00 crc kubenswrapper[5037]: I1126 15:15:00.148729 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402835-ds26z"] Nov 26 15:15:00 crc kubenswrapper[5037]: E1126 15:15:00.149590 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d48f8419-3376-43b1-8177-51cdd23c8c18" containerName="extract-content" Nov 26 15:15:00 crc kubenswrapper[5037]: I1126 15:15:00.149608 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="d48f8419-3376-43b1-8177-51cdd23c8c18" containerName="extract-content" Nov 26 15:15:00 crc kubenswrapper[5037]: E1126 15:15:00.149620 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ed64604-d7e4-4b38-af94-bbdc8fe13ec2" containerName="extract-content" Nov 26 15:15:00 crc kubenswrapper[5037]: I1126 15:15:00.149627 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ed64604-d7e4-4b38-af94-bbdc8fe13ec2" containerName="extract-content" Nov 26 15:15:00 crc kubenswrapper[5037]: E1126 15:15:00.149637 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d48f8419-3376-43b1-8177-51cdd23c8c18" containerName="extract-utilities" Nov 26 15:15:00 crc kubenswrapper[5037]: I1126 15:15:00.149645 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="d48f8419-3376-43b1-8177-51cdd23c8c18" containerName="extract-utilities" Nov 26 15:15:00 crc kubenswrapper[5037]: E1126 15:15:00.149658 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ed64604-d7e4-4b38-af94-bbdc8fe13ec2" containerName="extract-utilities" Nov 26 15:15:00 crc kubenswrapper[5037]: I1126 15:15:00.149665 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ed64604-d7e4-4b38-af94-bbdc8fe13ec2" containerName="extract-utilities" Nov 26 15:15:00 crc kubenswrapper[5037]: E1126 15:15:00.149684 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ed64604-d7e4-4b38-af94-bbdc8fe13ec2" containerName="registry-server" Nov 26 15:15:00 crc kubenswrapper[5037]: I1126 15:15:00.149691 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ed64604-d7e4-4b38-af94-bbdc8fe13ec2" containerName="registry-server" Nov 26 15:15:00 crc kubenswrapper[5037]: E1126 15:15:00.149712 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d48f8419-3376-43b1-8177-51cdd23c8c18" containerName="registry-server" Nov 26 15:15:00 crc kubenswrapper[5037]: I1126 15:15:00.149718 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="d48f8419-3376-43b1-8177-51cdd23c8c18" containerName="registry-server" Nov 26 15:15:00 crc kubenswrapper[5037]: I1126 15:15:00.149879 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="d48f8419-3376-43b1-8177-51cdd23c8c18" containerName="registry-server" Nov 26 15:15:00 crc kubenswrapper[5037]: I1126 15:15:00.149897 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ed64604-d7e4-4b38-af94-bbdc8fe13ec2" containerName="registry-server" Nov 26 15:15:00 crc kubenswrapper[5037]: I1126 15:15:00.150495 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402835-ds26z" Nov 26 15:15:00 crc kubenswrapper[5037]: I1126 15:15:00.153379 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 15:15:00 crc kubenswrapper[5037]: I1126 15:15:00.153460 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 15:15:00 crc kubenswrapper[5037]: I1126 15:15:00.160451 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402835-ds26z"] Nov 26 15:15:00 crc kubenswrapper[5037]: I1126 15:15:00.243143 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8cn2z\" (UniqueName: \"kubernetes.io/projected/05dd7116-9e90-4e23-af42-af2bd75d9808-kube-api-access-8cn2z\") pod \"collect-profiles-29402835-ds26z\" (UID: \"05dd7116-9e90-4e23-af42-af2bd75d9808\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402835-ds26z" Nov 26 15:15:00 crc kubenswrapper[5037]: I1126 15:15:00.243205 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/05dd7116-9e90-4e23-af42-af2bd75d9808-config-volume\") pod \"collect-profiles-29402835-ds26z\" (UID: \"05dd7116-9e90-4e23-af42-af2bd75d9808\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402835-ds26z" Nov 26 15:15:00 crc kubenswrapper[5037]: I1126 15:15:00.243257 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/05dd7116-9e90-4e23-af42-af2bd75d9808-secret-volume\") pod \"collect-profiles-29402835-ds26z\" (UID: \"05dd7116-9e90-4e23-af42-af2bd75d9808\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402835-ds26z" Nov 26 15:15:00 crc kubenswrapper[5037]: I1126 15:15:00.344567 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/05dd7116-9e90-4e23-af42-af2bd75d9808-config-volume\") pod \"collect-profiles-29402835-ds26z\" (UID: \"05dd7116-9e90-4e23-af42-af2bd75d9808\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402835-ds26z" Nov 26 15:15:00 crc kubenswrapper[5037]: I1126 15:15:00.344647 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/05dd7116-9e90-4e23-af42-af2bd75d9808-secret-volume\") pod \"collect-profiles-29402835-ds26z\" (UID: \"05dd7116-9e90-4e23-af42-af2bd75d9808\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402835-ds26z" Nov 26 15:15:00 crc kubenswrapper[5037]: I1126 15:15:00.344730 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8cn2z\" (UniqueName: \"kubernetes.io/projected/05dd7116-9e90-4e23-af42-af2bd75d9808-kube-api-access-8cn2z\") pod \"collect-profiles-29402835-ds26z\" (UID: \"05dd7116-9e90-4e23-af42-af2bd75d9808\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402835-ds26z" Nov 26 15:15:00 crc kubenswrapper[5037]: I1126 15:15:00.345449 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/05dd7116-9e90-4e23-af42-af2bd75d9808-config-volume\") pod \"collect-profiles-29402835-ds26z\" (UID: \"05dd7116-9e90-4e23-af42-af2bd75d9808\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402835-ds26z" Nov 26 15:15:00 crc kubenswrapper[5037]: I1126 15:15:00.353918 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/05dd7116-9e90-4e23-af42-af2bd75d9808-secret-volume\") pod \"collect-profiles-29402835-ds26z\" (UID: \"05dd7116-9e90-4e23-af42-af2bd75d9808\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402835-ds26z" Nov 26 15:15:00 crc kubenswrapper[5037]: I1126 15:15:00.363350 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8cn2z\" (UniqueName: \"kubernetes.io/projected/05dd7116-9e90-4e23-af42-af2bd75d9808-kube-api-access-8cn2z\") pod \"collect-profiles-29402835-ds26z\" (UID: \"05dd7116-9e90-4e23-af42-af2bd75d9808\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402835-ds26z" Nov 26 15:15:00 crc kubenswrapper[5037]: I1126 15:15:00.500866 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402835-ds26z" Nov 26 15:15:01 crc kubenswrapper[5037]: I1126 15:15:01.002330 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402835-ds26z"] Nov 26 15:15:01 crc kubenswrapper[5037]: I1126 15:15:01.112029 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402835-ds26z" event={"ID":"05dd7116-9e90-4e23-af42-af2bd75d9808","Type":"ContainerStarted","Data":"93ce7d9a0ff8bac94dbd365aee8d96400247a865415aa65260d63927a4694ba0"} Nov 26 15:15:02 crc kubenswrapper[5037]: I1126 15:15:02.122193 5037 generic.go:334] "Generic (PLEG): container finished" podID="05dd7116-9e90-4e23-af42-af2bd75d9808" containerID="8570fcbdeb0c94320466bf0cd4458c08888a2df129b01103e9f481f79cbfecb5" exitCode=0 Nov 26 15:15:02 crc kubenswrapper[5037]: I1126 15:15:02.122352 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402835-ds26z" event={"ID":"05dd7116-9e90-4e23-af42-af2bd75d9808","Type":"ContainerDied","Data":"8570fcbdeb0c94320466bf0cd4458c08888a2df129b01103e9f481f79cbfecb5"} Nov 26 15:15:03 crc kubenswrapper[5037]: I1126 15:15:03.446829 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402835-ds26z" Nov 26 15:15:03 crc kubenswrapper[5037]: I1126 15:15:03.591233 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/05dd7116-9e90-4e23-af42-af2bd75d9808-secret-volume\") pod \"05dd7116-9e90-4e23-af42-af2bd75d9808\" (UID: \"05dd7116-9e90-4e23-af42-af2bd75d9808\") " Nov 26 15:15:03 crc kubenswrapper[5037]: I1126 15:15:03.591379 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/05dd7116-9e90-4e23-af42-af2bd75d9808-config-volume\") pod \"05dd7116-9e90-4e23-af42-af2bd75d9808\" (UID: \"05dd7116-9e90-4e23-af42-af2bd75d9808\") " Nov 26 15:15:03 crc kubenswrapper[5037]: I1126 15:15:03.591470 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8cn2z\" (UniqueName: \"kubernetes.io/projected/05dd7116-9e90-4e23-af42-af2bd75d9808-kube-api-access-8cn2z\") pod \"05dd7116-9e90-4e23-af42-af2bd75d9808\" (UID: \"05dd7116-9e90-4e23-af42-af2bd75d9808\") " Nov 26 15:15:03 crc kubenswrapper[5037]: I1126 15:15:03.592024 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/05dd7116-9e90-4e23-af42-af2bd75d9808-config-volume" (OuterVolumeSpecName: "config-volume") pod "05dd7116-9e90-4e23-af42-af2bd75d9808" (UID: "05dd7116-9e90-4e23-af42-af2bd75d9808"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:15:03 crc kubenswrapper[5037]: I1126 15:15:03.598848 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05dd7116-9e90-4e23-af42-af2bd75d9808-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "05dd7116-9e90-4e23-af42-af2bd75d9808" (UID: "05dd7116-9e90-4e23-af42-af2bd75d9808"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:15:03 crc kubenswrapper[5037]: I1126 15:15:03.600673 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/05dd7116-9e90-4e23-af42-af2bd75d9808-kube-api-access-8cn2z" (OuterVolumeSpecName: "kube-api-access-8cn2z") pod "05dd7116-9e90-4e23-af42-af2bd75d9808" (UID: "05dd7116-9e90-4e23-af42-af2bd75d9808"). InnerVolumeSpecName "kube-api-access-8cn2z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:15:03 crc kubenswrapper[5037]: I1126 15:15:03.693621 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8cn2z\" (UniqueName: \"kubernetes.io/projected/05dd7116-9e90-4e23-af42-af2bd75d9808-kube-api-access-8cn2z\") on node \"crc\" DevicePath \"\"" Nov 26 15:15:03 crc kubenswrapper[5037]: I1126 15:15:03.693680 5037 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/05dd7116-9e90-4e23-af42-af2bd75d9808-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 15:15:03 crc kubenswrapper[5037]: I1126 15:15:03.693697 5037 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/05dd7116-9e90-4e23-af42-af2bd75d9808-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 15:15:04 crc kubenswrapper[5037]: I1126 15:15:04.140438 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402835-ds26z" event={"ID":"05dd7116-9e90-4e23-af42-af2bd75d9808","Type":"ContainerDied","Data":"93ce7d9a0ff8bac94dbd365aee8d96400247a865415aa65260d63927a4694ba0"} Nov 26 15:15:04 crc kubenswrapper[5037]: I1126 15:15:04.140496 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="93ce7d9a0ff8bac94dbd365aee8d96400247a865415aa65260d63927a4694ba0" Nov 26 15:15:04 crc kubenswrapper[5037]: I1126 15:15:04.140523 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402835-ds26z" Nov 26 15:15:04 crc kubenswrapper[5037]: I1126 15:15:04.562014 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402790-rrd4r"] Nov 26 15:15:04 crc kubenswrapper[5037]: I1126 15:15:04.570103 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402790-rrd4r"] Nov 26 15:15:05 crc kubenswrapper[5037]: I1126 15:15:05.922806 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bed440b8-34d8-4d85-8bb3-b682c60cbdfd" path="/var/lib/kubelet/pods/bed440b8-34d8-4d85-8bb3-b682c60cbdfd/volumes" Nov 26 15:15:11 crc kubenswrapper[5037]: I1126 15:15:11.247561 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:15:11 crc kubenswrapper[5037]: I1126 15:15:11.248203 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:15:11 crc kubenswrapper[5037]: I1126 15:15:11.248403 5037 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" Nov 26 15:15:11 crc kubenswrapper[5037]: I1126 15:15:11.249816 5037 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"42296f169d08c107878aa61c500043474fe4185d69e7bce0567b40289de11c2f"} pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 15:15:11 crc kubenswrapper[5037]: I1126 15:15:11.249926 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" containerID="cri-o://42296f169d08c107878aa61c500043474fe4185d69e7bce0567b40289de11c2f" gracePeriod=600 Nov 26 15:15:11 crc kubenswrapper[5037]: E1126 15:15:11.436042 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:15:12 crc kubenswrapper[5037]: I1126 15:15:12.209043 5037 generic.go:334] "Generic (PLEG): container finished" podID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerID="42296f169d08c107878aa61c500043474fe4185d69e7bce0567b40289de11c2f" exitCode=0 Nov 26 15:15:12 crc kubenswrapper[5037]: I1126 15:15:12.209411 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" event={"ID":"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb","Type":"ContainerDied","Data":"42296f169d08c107878aa61c500043474fe4185d69e7bce0567b40289de11c2f"} Nov 26 15:15:12 crc kubenswrapper[5037]: I1126 15:15:12.209452 5037 scope.go:117] "RemoveContainer" containerID="0e20b02fa213c98046037946a51795fa186938aa52bb32cb8338a014f35c8f29" Nov 26 15:15:12 crc kubenswrapper[5037]: I1126 15:15:12.210123 5037 scope.go:117] "RemoveContainer" containerID="42296f169d08c107878aa61c500043474fe4185d69e7bce0567b40289de11c2f" Nov 26 15:15:12 crc kubenswrapper[5037]: E1126 15:15:12.210453 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:15:12 crc kubenswrapper[5037]: I1126 15:15:12.867272 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-t8bgn"] Nov 26 15:15:12 crc kubenswrapper[5037]: E1126 15:15:12.867652 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05dd7116-9e90-4e23-af42-af2bd75d9808" containerName="collect-profiles" Nov 26 15:15:12 crc kubenswrapper[5037]: I1126 15:15:12.867667 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="05dd7116-9e90-4e23-af42-af2bd75d9808" containerName="collect-profiles" Nov 26 15:15:12 crc kubenswrapper[5037]: I1126 15:15:12.867864 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="05dd7116-9e90-4e23-af42-af2bd75d9808" containerName="collect-profiles" Nov 26 15:15:12 crc kubenswrapper[5037]: I1126 15:15:12.869135 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t8bgn" Nov 26 15:15:12 crc kubenswrapper[5037]: I1126 15:15:12.888320 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-t8bgn"] Nov 26 15:15:13 crc kubenswrapper[5037]: I1126 15:15:13.039483 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e20f81a6-3773-4cb5-9bd6-49a082c6a5ec-catalog-content\") pod \"redhat-operators-t8bgn\" (UID: \"e20f81a6-3773-4cb5-9bd6-49a082c6a5ec\") " pod="openshift-marketplace/redhat-operators-t8bgn" Nov 26 15:15:13 crc kubenswrapper[5037]: I1126 15:15:13.039580 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c7rtv\" (UniqueName: \"kubernetes.io/projected/e20f81a6-3773-4cb5-9bd6-49a082c6a5ec-kube-api-access-c7rtv\") pod \"redhat-operators-t8bgn\" (UID: \"e20f81a6-3773-4cb5-9bd6-49a082c6a5ec\") " pod="openshift-marketplace/redhat-operators-t8bgn" Nov 26 15:15:13 crc kubenswrapper[5037]: I1126 15:15:13.039799 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e20f81a6-3773-4cb5-9bd6-49a082c6a5ec-utilities\") pod \"redhat-operators-t8bgn\" (UID: \"e20f81a6-3773-4cb5-9bd6-49a082c6a5ec\") " pod="openshift-marketplace/redhat-operators-t8bgn" Nov 26 15:15:13 crc kubenswrapper[5037]: I1126 15:15:13.141051 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e20f81a6-3773-4cb5-9bd6-49a082c6a5ec-utilities\") pod \"redhat-operators-t8bgn\" (UID: \"e20f81a6-3773-4cb5-9bd6-49a082c6a5ec\") " pod="openshift-marketplace/redhat-operators-t8bgn" Nov 26 15:15:13 crc kubenswrapper[5037]: I1126 15:15:13.141685 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e20f81a6-3773-4cb5-9bd6-49a082c6a5ec-catalog-content\") pod \"redhat-operators-t8bgn\" (UID: \"e20f81a6-3773-4cb5-9bd6-49a082c6a5ec\") " pod="openshift-marketplace/redhat-operators-t8bgn" Nov 26 15:15:13 crc kubenswrapper[5037]: I1126 15:15:13.141619 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e20f81a6-3773-4cb5-9bd6-49a082c6a5ec-utilities\") pod \"redhat-operators-t8bgn\" (UID: \"e20f81a6-3773-4cb5-9bd6-49a082c6a5ec\") " pod="openshift-marketplace/redhat-operators-t8bgn" Nov 26 15:15:13 crc kubenswrapper[5037]: I1126 15:15:13.141765 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c7rtv\" (UniqueName: \"kubernetes.io/projected/e20f81a6-3773-4cb5-9bd6-49a082c6a5ec-kube-api-access-c7rtv\") pod \"redhat-operators-t8bgn\" (UID: \"e20f81a6-3773-4cb5-9bd6-49a082c6a5ec\") " pod="openshift-marketplace/redhat-operators-t8bgn" Nov 26 15:15:13 crc kubenswrapper[5037]: I1126 15:15:13.142325 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e20f81a6-3773-4cb5-9bd6-49a082c6a5ec-catalog-content\") pod \"redhat-operators-t8bgn\" (UID: \"e20f81a6-3773-4cb5-9bd6-49a082c6a5ec\") " pod="openshift-marketplace/redhat-operators-t8bgn" Nov 26 15:15:13 crc kubenswrapper[5037]: I1126 15:15:13.164279 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c7rtv\" (UniqueName: \"kubernetes.io/projected/e20f81a6-3773-4cb5-9bd6-49a082c6a5ec-kube-api-access-c7rtv\") pod \"redhat-operators-t8bgn\" (UID: \"e20f81a6-3773-4cb5-9bd6-49a082c6a5ec\") " pod="openshift-marketplace/redhat-operators-t8bgn" Nov 26 15:15:13 crc kubenswrapper[5037]: I1126 15:15:13.241199 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t8bgn" Nov 26 15:15:13 crc kubenswrapper[5037]: I1126 15:15:13.445492 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-t8bgn"] Nov 26 15:15:14 crc kubenswrapper[5037]: I1126 15:15:14.227323 5037 generic.go:334] "Generic (PLEG): container finished" podID="e20f81a6-3773-4cb5-9bd6-49a082c6a5ec" containerID="b027312798fd3a861d2f93ae4d4dd8f9acc84ced3208ff7bb6c06e1d409eae04" exitCode=0 Nov 26 15:15:14 crc kubenswrapper[5037]: I1126 15:15:14.227580 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t8bgn" event={"ID":"e20f81a6-3773-4cb5-9bd6-49a082c6a5ec","Type":"ContainerDied","Data":"b027312798fd3a861d2f93ae4d4dd8f9acc84ced3208ff7bb6c06e1d409eae04"} Nov 26 15:15:14 crc kubenswrapper[5037]: I1126 15:15:14.227604 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t8bgn" event={"ID":"e20f81a6-3773-4cb5-9bd6-49a082c6a5ec","Type":"ContainerStarted","Data":"806905bc64c90209e3b5abc55a4f125f38b434075222560647bf8b11541cd87b"} Nov 26 15:15:14 crc kubenswrapper[5037]: I1126 15:15:14.229021 5037 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 15:15:16 crc kubenswrapper[5037]: I1126 15:15:16.248491 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t8bgn" event={"ID":"e20f81a6-3773-4cb5-9bd6-49a082c6a5ec","Type":"ContainerStarted","Data":"3cd916c0b795e3a857a855a11d183499551c56c9b3d1f43488658f0e063ce445"} Nov 26 15:15:17 crc kubenswrapper[5037]: I1126 15:15:17.263359 5037 generic.go:334] "Generic (PLEG): container finished" podID="e20f81a6-3773-4cb5-9bd6-49a082c6a5ec" containerID="3cd916c0b795e3a857a855a11d183499551c56c9b3d1f43488658f0e063ce445" exitCode=0 Nov 26 15:15:17 crc kubenswrapper[5037]: I1126 15:15:17.263616 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t8bgn" event={"ID":"e20f81a6-3773-4cb5-9bd6-49a082c6a5ec","Type":"ContainerDied","Data":"3cd916c0b795e3a857a855a11d183499551c56c9b3d1f43488658f0e063ce445"} Nov 26 15:15:17 crc kubenswrapper[5037]: I1126 15:15:17.263868 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t8bgn" event={"ID":"e20f81a6-3773-4cb5-9bd6-49a082c6a5ec","Type":"ContainerStarted","Data":"574c6a295d3fbcc43f940ce4ab54e5fe0cc323169357ee55077ec567a1347cd0"} Nov 26 15:15:17 crc kubenswrapper[5037]: I1126 15:15:17.288393 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-t8bgn" podStartSLOduration=2.587632571 podStartE2EDuration="5.288374395s" podCreationTimestamp="2025-11-26 15:15:12 +0000 UTC" firstStartedPulling="2025-11-26 15:15:14.228828042 +0000 UTC m=+3581.025598226" lastFinishedPulling="2025-11-26 15:15:16.929569836 +0000 UTC m=+3583.726340050" observedRunningTime="2025-11-26 15:15:17.287279668 +0000 UTC m=+3584.084049862" watchObservedRunningTime="2025-11-26 15:15:17.288374395 +0000 UTC m=+3584.085144589" Nov 26 15:15:23 crc kubenswrapper[5037]: I1126 15:15:23.242618 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-t8bgn" Nov 26 15:15:23 crc kubenswrapper[5037]: I1126 15:15:23.243032 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-t8bgn" Nov 26 15:15:24 crc kubenswrapper[5037]: I1126 15:15:24.300624 5037 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-t8bgn" podUID="e20f81a6-3773-4cb5-9bd6-49a082c6a5ec" containerName="registry-server" probeResult="failure" output=< Nov 26 15:15:24 crc kubenswrapper[5037]: timeout: failed to connect service ":50051" within 1s Nov 26 15:15:24 crc kubenswrapper[5037]: > Nov 26 15:15:26 crc kubenswrapper[5037]: I1126 15:15:26.908991 5037 scope.go:117] "RemoveContainer" containerID="42296f169d08c107878aa61c500043474fe4185d69e7bce0567b40289de11c2f" Nov 26 15:15:26 crc kubenswrapper[5037]: E1126 15:15:26.911007 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:15:33 crc kubenswrapper[5037]: I1126 15:15:33.281062 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-t8bgn" Nov 26 15:15:33 crc kubenswrapper[5037]: I1126 15:15:33.324767 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-t8bgn" Nov 26 15:15:33 crc kubenswrapper[5037]: I1126 15:15:33.518075 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-t8bgn"] Nov 26 15:15:34 crc kubenswrapper[5037]: I1126 15:15:34.752914 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-t8bgn" podUID="e20f81a6-3773-4cb5-9bd6-49a082c6a5ec" containerName="registry-server" containerID="cri-o://574c6a295d3fbcc43f940ce4ab54e5fe0cc323169357ee55077ec567a1347cd0" gracePeriod=2 Nov 26 15:15:35 crc kubenswrapper[5037]: I1126 15:15:35.250825 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t8bgn" Nov 26 15:15:35 crc kubenswrapper[5037]: I1126 15:15:35.444827 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c7rtv\" (UniqueName: \"kubernetes.io/projected/e20f81a6-3773-4cb5-9bd6-49a082c6a5ec-kube-api-access-c7rtv\") pod \"e20f81a6-3773-4cb5-9bd6-49a082c6a5ec\" (UID: \"e20f81a6-3773-4cb5-9bd6-49a082c6a5ec\") " Nov 26 15:15:35 crc kubenswrapper[5037]: I1126 15:15:35.444922 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e20f81a6-3773-4cb5-9bd6-49a082c6a5ec-catalog-content\") pod \"e20f81a6-3773-4cb5-9bd6-49a082c6a5ec\" (UID: \"e20f81a6-3773-4cb5-9bd6-49a082c6a5ec\") " Nov 26 15:15:35 crc kubenswrapper[5037]: I1126 15:15:35.444950 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e20f81a6-3773-4cb5-9bd6-49a082c6a5ec-utilities\") pod \"e20f81a6-3773-4cb5-9bd6-49a082c6a5ec\" (UID: \"e20f81a6-3773-4cb5-9bd6-49a082c6a5ec\") " Nov 26 15:15:35 crc kubenswrapper[5037]: I1126 15:15:35.446177 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e20f81a6-3773-4cb5-9bd6-49a082c6a5ec-utilities" (OuterVolumeSpecName: "utilities") pod "e20f81a6-3773-4cb5-9bd6-49a082c6a5ec" (UID: "e20f81a6-3773-4cb5-9bd6-49a082c6a5ec"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:15:35 crc kubenswrapper[5037]: I1126 15:15:35.450276 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e20f81a6-3773-4cb5-9bd6-49a082c6a5ec-kube-api-access-c7rtv" (OuterVolumeSpecName: "kube-api-access-c7rtv") pod "e20f81a6-3773-4cb5-9bd6-49a082c6a5ec" (UID: "e20f81a6-3773-4cb5-9bd6-49a082c6a5ec"). InnerVolumeSpecName "kube-api-access-c7rtv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:15:35 crc kubenswrapper[5037]: I1126 15:15:35.535673 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e20f81a6-3773-4cb5-9bd6-49a082c6a5ec-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e20f81a6-3773-4cb5-9bd6-49a082c6a5ec" (UID: "e20f81a6-3773-4cb5-9bd6-49a082c6a5ec"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:15:35 crc kubenswrapper[5037]: I1126 15:15:35.547316 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c7rtv\" (UniqueName: \"kubernetes.io/projected/e20f81a6-3773-4cb5-9bd6-49a082c6a5ec-kube-api-access-c7rtv\") on node \"crc\" DevicePath \"\"" Nov 26 15:15:35 crc kubenswrapper[5037]: I1126 15:15:35.547370 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e20f81a6-3773-4cb5-9bd6-49a082c6a5ec-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 15:15:35 crc kubenswrapper[5037]: I1126 15:15:35.547389 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e20f81a6-3773-4cb5-9bd6-49a082c6a5ec-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 15:15:35 crc kubenswrapper[5037]: I1126 15:15:35.762681 5037 generic.go:334] "Generic (PLEG): container finished" podID="e20f81a6-3773-4cb5-9bd6-49a082c6a5ec" containerID="574c6a295d3fbcc43f940ce4ab54e5fe0cc323169357ee55077ec567a1347cd0" exitCode=0 Nov 26 15:15:35 crc kubenswrapper[5037]: I1126 15:15:35.762759 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t8bgn" event={"ID":"e20f81a6-3773-4cb5-9bd6-49a082c6a5ec","Type":"ContainerDied","Data":"574c6a295d3fbcc43f940ce4ab54e5fe0cc323169357ee55077ec567a1347cd0"} Nov 26 15:15:35 crc kubenswrapper[5037]: I1126 15:15:35.762990 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t8bgn" event={"ID":"e20f81a6-3773-4cb5-9bd6-49a082c6a5ec","Type":"ContainerDied","Data":"806905bc64c90209e3b5abc55a4f125f38b434075222560647bf8b11541cd87b"} Nov 26 15:15:35 crc kubenswrapper[5037]: I1126 15:15:35.763016 5037 scope.go:117] "RemoveContainer" containerID="574c6a295d3fbcc43f940ce4ab54e5fe0cc323169357ee55077ec567a1347cd0" Nov 26 15:15:35 crc kubenswrapper[5037]: I1126 15:15:35.762793 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t8bgn" Nov 26 15:15:35 crc kubenswrapper[5037]: I1126 15:15:35.811465 5037 scope.go:117] "RemoveContainer" containerID="3cd916c0b795e3a857a855a11d183499551c56c9b3d1f43488658f0e063ce445" Nov 26 15:15:35 crc kubenswrapper[5037]: I1126 15:15:35.812038 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-t8bgn"] Nov 26 15:15:35 crc kubenswrapper[5037]: I1126 15:15:35.818363 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-t8bgn"] Nov 26 15:15:35 crc kubenswrapper[5037]: I1126 15:15:35.843950 5037 scope.go:117] "RemoveContainer" containerID="b027312798fd3a861d2f93ae4d4dd8f9acc84ced3208ff7bb6c06e1d409eae04" Nov 26 15:15:35 crc kubenswrapper[5037]: I1126 15:15:35.871783 5037 scope.go:117] "RemoveContainer" containerID="574c6a295d3fbcc43f940ce4ab54e5fe0cc323169357ee55077ec567a1347cd0" Nov 26 15:15:35 crc kubenswrapper[5037]: E1126 15:15:35.872838 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"574c6a295d3fbcc43f940ce4ab54e5fe0cc323169357ee55077ec567a1347cd0\": container with ID starting with 574c6a295d3fbcc43f940ce4ab54e5fe0cc323169357ee55077ec567a1347cd0 not found: ID does not exist" containerID="574c6a295d3fbcc43f940ce4ab54e5fe0cc323169357ee55077ec567a1347cd0" Nov 26 15:15:35 crc kubenswrapper[5037]: I1126 15:15:35.872896 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"574c6a295d3fbcc43f940ce4ab54e5fe0cc323169357ee55077ec567a1347cd0"} err="failed to get container status \"574c6a295d3fbcc43f940ce4ab54e5fe0cc323169357ee55077ec567a1347cd0\": rpc error: code = NotFound desc = could not find container \"574c6a295d3fbcc43f940ce4ab54e5fe0cc323169357ee55077ec567a1347cd0\": container with ID starting with 574c6a295d3fbcc43f940ce4ab54e5fe0cc323169357ee55077ec567a1347cd0 not found: ID does not exist" Nov 26 15:15:35 crc kubenswrapper[5037]: I1126 15:15:35.872931 5037 scope.go:117] "RemoveContainer" containerID="3cd916c0b795e3a857a855a11d183499551c56c9b3d1f43488658f0e063ce445" Nov 26 15:15:35 crc kubenswrapper[5037]: E1126 15:15:35.873656 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3cd916c0b795e3a857a855a11d183499551c56c9b3d1f43488658f0e063ce445\": container with ID starting with 3cd916c0b795e3a857a855a11d183499551c56c9b3d1f43488658f0e063ce445 not found: ID does not exist" containerID="3cd916c0b795e3a857a855a11d183499551c56c9b3d1f43488658f0e063ce445" Nov 26 15:15:35 crc kubenswrapper[5037]: I1126 15:15:35.873758 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3cd916c0b795e3a857a855a11d183499551c56c9b3d1f43488658f0e063ce445"} err="failed to get container status \"3cd916c0b795e3a857a855a11d183499551c56c9b3d1f43488658f0e063ce445\": rpc error: code = NotFound desc = could not find container \"3cd916c0b795e3a857a855a11d183499551c56c9b3d1f43488658f0e063ce445\": container with ID starting with 3cd916c0b795e3a857a855a11d183499551c56c9b3d1f43488658f0e063ce445 not found: ID does not exist" Nov 26 15:15:35 crc kubenswrapper[5037]: I1126 15:15:35.873837 5037 scope.go:117] "RemoveContainer" containerID="b027312798fd3a861d2f93ae4d4dd8f9acc84ced3208ff7bb6c06e1d409eae04" Nov 26 15:15:35 crc kubenswrapper[5037]: E1126 15:15:35.874342 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b027312798fd3a861d2f93ae4d4dd8f9acc84ced3208ff7bb6c06e1d409eae04\": container with ID starting with b027312798fd3a861d2f93ae4d4dd8f9acc84ced3208ff7bb6c06e1d409eae04 not found: ID does not exist" containerID="b027312798fd3a861d2f93ae4d4dd8f9acc84ced3208ff7bb6c06e1d409eae04" Nov 26 15:15:35 crc kubenswrapper[5037]: I1126 15:15:35.874370 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b027312798fd3a861d2f93ae4d4dd8f9acc84ced3208ff7bb6c06e1d409eae04"} err="failed to get container status \"b027312798fd3a861d2f93ae4d4dd8f9acc84ced3208ff7bb6c06e1d409eae04\": rpc error: code = NotFound desc = could not find container \"b027312798fd3a861d2f93ae4d4dd8f9acc84ced3208ff7bb6c06e1d409eae04\": container with ID starting with b027312798fd3a861d2f93ae4d4dd8f9acc84ced3208ff7bb6c06e1d409eae04 not found: ID does not exist" Nov 26 15:15:35 crc kubenswrapper[5037]: I1126 15:15:35.925158 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e20f81a6-3773-4cb5-9bd6-49a082c6a5ec" path="/var/lib/kubelet/pods/e20f81a6-3773-4cb5-9bd6-49a082c6a5ec/volumes" Nov 26 15:15:41 crc kubenswrapper[5037]: I1126 15:15:41.908601 5037 scope.go:117] "RemoveContainer" containerID="42296f169d08c107878aa61c500043474fe4185d69e7bce0567b40289de11c2f" Nov 26 15:15:41 crc kubenswrapper[5037]: E1126 15:15:41.909359 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:15:50 crc kubenswrapper[5037]: I1126 15:15:50.832711 5037 scope.go:117] "RemoveContainer" containerID="aa79fb2ceb2bfd00b0002638a4d8f0c4008de74f954f2d69de297cb3f6ec14f3" Nov 26 15:15:52 crc kubenswrapper[5037]: I1126 15:15:52.912575 5037 scope.go:117] "RemoveContainer" containerID="42296f169d08c107878aa61c500043474fe4185d69e7bce0567b40289de11c2f" Nov 26 15:15:52 crc kubenswrapper[5037]: E1126 15:15:52.915189 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:16:03 crc kubenswrapper[5037]: I1126 15:16:03.912519 5037 scope.go:117] "RemoveContainer" containerID="42296f169d08c107878aa61c500043474fe4185d69e7bce0567b40289de11c2f" Nov 26 15:16:03 crc kubenswrapper[5037]: E1126 15:16:03.913135 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:16:15 crc kubenswrapper[5037]: I1126 15:16:15.908175 5037 scope.go:117] "RemoveContainer" containerID="42296f169d08c107878aa61c500043474fe4185d69e7bce0567b40289de11c2f" Nov 26 15:16:15 crc kubenswrapper[5037]: E1126 15:16:15.909411 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:16:29 crc kubenswrapper[5037]: I1126 15:16:29.908610 5037 scope.go:117] "RemoveContainer" containerID="42296f169d08c107878aa61c500043474fe4185d69e7bce0567b40289de11c2f" Nov 26 15:16:29 crc kubenswrapper[5037]: E1126 15:16:29.910053 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:16:40 crc kubenswrapper[5037]: I1126 15:16:40.908727 5037 scope.go:117] "RemoveContainer" containerID="42296f169d08c107878aa61c500043474fe4185d69e7bce0567b40289de11c2f" Nov 26 15:16:40 crc kubenswrapper[5037]: E1126 15:16:40.911009 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:16:55 crc kubenswrapper[5037]: I1126 15:16:55.908841 5037 scope.go:117] "RemoveContainer" containerID="42296f169d08c107878aa61c500043474fe4185d69e7bce0567b40289de11c2f" Nov 26 15:16:55 crc kubenswrapper[5037]: E1126 15:16:55.910093 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:17:09 crc kubenswrapper[5037]: I1126 15:17:09.908447 5037 scope.go:117] "RemoveContainer" containerID="42296f169d08c107878aa61c500043474fe4185d69e7bce0567b40289de11c2f" Nov 26 15:17:09 crc kubenswrapper[5037]: E1126 15:17:09.909679 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:17:12 crc kubenswrapper[5037]: I1126 15:17:12.493005 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-xr7nc"] Nov 26 15:17:12 crc kubenswrapper[5037]: E1126 15:17:12.493434 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e20f81a6-3773-4cb5-9bd6-49a082c6a5ec" containerName="registry-server" Nov 26 15:17:12 crc kubenswrapper[5037]: I1126 15:17:12.493456 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="e20f81a6-3773-4cb5-9bd6-49a082c6a5ec" containerName="registry-server" Nov 26 15:17:12 crc kubenswrapper[5037]: E1126 15:17:12.493487 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e20f81a6-3773-4cb5-9bd6-49a082c6a5ec" containerName="extract-utilities" Nov 26 15:17:12 crc kubenswrapper[5037]: I1126 15:17:12.493500 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="e20f81a6-3773-4cb5-9bd6-49a082c6a5ec" containerName="extract-utilities" Nov 26 15:17:12 crc kubenswrapper[5037]: E1126 15:17:12.493528 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e20f81a6-3773-4cb5-9bd6-49a082c6a5ec" containerName="extract-content" Nov 26 15:17:12 crc kubenswrapper[5037]: I1126 15:17:12.493538 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="e20f81a6-3773-4cb5-9bd6-49a082c6a5ec" containerName="extract-content" Nov 26 15:17:12 crc kubenswrapper[5037]: I1126 15:17:12.493745 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="e20f81a6-3773-4cb5-9bd6-49a082c6a5ec" containerName="registry-server" Nov 26 15:17:12 crc kubenswrapper[5037]: I1126 15:17:12.495051 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xr7nc" Nov 26 15:17:12 crc kubenswrapper[5037]: I1126 15:17:12.507514 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xr7nc"] Nov 26 15:17:12 crc kubenswrapper[5037]: I1126 15:17:12.600603 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f3f34f88-8a96-4518-9acc-7fa171229cf8-utilities\") pod \"redhat-marketplace-xr7nc\" (UID: \"f3f34f88-8a96-4518-9acc-7fa171229cf8\") " pod="openshift-marketplace/redhat-marketplace-xr7nc" Nov 26 15:17:12 crc kubenswrapper[5037]: I1126 15:17:12.600725 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f3f34f88-8a96-4518-9acc-7fa171229cf8-catalog-content\") pod \"redhat-marketplace-xr7nc\" (UID: \"f3f34f88-8a96-4518-9acc-7fa171229cf8\") " pod="openshift-marketplace/redhat-marketplace-xr7nc" Nov 26 15:17:12 crc kubenswrapper[5037]: I1126 15:17:12.600753 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69c5x\" (UniqueName: \"kubernetes.io/projected/f3f34f88-8a96-4518-9acc-7fa171229cf8-kube-api-access-69c5x\") pod \"redhat-marketplace-xr7nc\" (UID: \"f3f34f88-8a96-4518-9acc-7fa171229cf8\") " pod="openshift-marketplace/redhat-marketplace-xr7nc" Nov 26 15:17:12 crc kubenswrapper[5037]: I1126 15:17:12.702050 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-69c5x\" (UniqueName: \"kubernetes.io/projected/f3f34f88-8a96-4518-9acc-7fa171229cf8-kube-api-access-69c5x\") pod \"redhat-marketplace-xr7nc\" (UID: \"f3f34f88-8a96-4518-9acc-7fa171229cf8\") " pod="openshift-marketplace/redhat-marketplace-xr7nc" Nov 26 15:17:12 crc kubenswrapper[5037]: I1126 15:17:12.702181 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f3f34f88-8a96-4518-9acc-7fa171229cf8-utilities\") pod \"redhat-marketplace-xr7nc\" (UID: \"f3f34f88-8a96-4518-9acc-7fa171229cf8\") " pod="openshift-marketplace/redhat-marketplace-xr7nc" Nov 26 15:17:12 crc kubenswrapper[5037]: I1126 15:17:12.702264 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f3f34f88-8a96-4518-9acc-7fa171229cf8-catalog-content\") pod \"redhat-marketplace-xr7nc\" (UID: \"f3f34f88-8a96-4518-9acc-7fa171229cf8\") " pod="openshift-marketplace/redhat-marketplace-xr7nc" Nov 26 15:17:12 crc kubenswrapper[5037]: I1126 15:17:12.702949 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f3f34f88-8a96-4518-9acc-7fa171229cf8-catalog-content\") pod \"redhat-marketplace-xr7nc\" (UID: \"f3f34f88-8a96-4518-9acc-7fa171229cf8\") " pod="openshift-marketplace/redhat-marketplace-xr7nc" Nov 26 15:17:12 crc kubenswrapper[5037]: I1126 15:17:12.702947 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f3f34f88-8a96-4518-9acc-7fa171229cf8-utilities\") pod \"redhat-marketplace-xr7nc\" (UID: \"f3f34f88-8a96-4518-9acc-7fa171229cf8\") " pod="openshift-marketplace/redhat-marketplace-xr7nc" Nov 26 15:17:12 crc kubenswrapper[5037]: I1126 15:17:12.729338 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-69c5x\" (UniqueName: \"kubernetes.io/projected/f3f34f88-8a96-4518-9acc-7fa171229cf8-kube-api-access-69c5x\") pod \"redhat-marketplace-xr7nc\" (UID: \"f3f34f88-8a96-4518-9acc-7fa171229cf8\") " pod="openshift-marketplace/redhat-marketplace-xr7nc" Nov 26 15:17:12 crc kubenswrapper[5037]: I1126 15:17:12.824884 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xr7nc" Nov 26 15:17:13 crc kubenswrapper[5037]: I1126 15:17:13.055857 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xr7nc"] Nov 26 15:17:13 crc kubenswrapper[5037]: I1126 15:17:13.659203 5037 generic.go:334] "Generic (PLEG): container finished" podID="f3f34f88-8a96-4518-9acc-7fa171229cf8" containerID="b3e5591d65893a38fb4015cb6ae2f08b156c595bdad872358db430831db00ea5" exitCode=0 Nov 26 15:17:13 crc kubenswrapper[5037]: I1126 15:17:13.659266 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xr7nc" event={"ID":"f3f34f88-8a96-4518-9acc-7fa171229cf8","Type":"ContainerDied","Data":"b3e5591d65893a38fb4015cb6ae2f08b156c595bdad872358db430831db00ea5"} Nov 26 15:17:13 crc kubenswrapper[5037]: I1126 15:17:13.659329 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xr7nc" event={"ID":"f3f34f88-8a96-4518-9acc-7fa171229cf8","Type":"ContainerStarted","Data":"41f5eecc003eb21384ef6fdcf6a19c1e73bc62a0fa9c4913df9240f37c316f75"} Nov 26 15:17:14 crc kubenswrapper[5037]: I1126 15:17:14.668935 5037 generic.go:334] "Generic (PLEG): container finished" podID="f3f34f88-8a96-4518-9acc-7fa171229cf8" containerID="588f30f25d606a498f0b1b247c7c3170288fefb8a0c5409a8d9784474046b52a" exitCode=0 Nov 26 15:17:14 crc kubenswrapper[5037]: I1126 15:17:14.669052 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xr7nc" event={"ID":"f3f34f88-8a96-4518-9acc-7fa171229cf8","Type":"ContainerDied","Data":"588f30f25d606a498f0b1b247c7c3170288fefb8a0c5409a8d9784474046b52a"} Nov 26 15:17:15 crc kubenswrapper[5037]: I1126 15:17:15.686369 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xr7nc" event={"ID":"f3f34f88-8a96-4518-9acc-7fa171229cf8","Type":"ContainerStarted","Data":"e6af1b8ce190ec68db2c84f7d208a96b4bcf82397914c7a852516d59115fd6bf"} Nov 26 15:17:15 crc kubenswrapper[5037]: I1126 15:17:15.715567 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-xr7nc" podStartSLOduration=2.272286758 podStartE2EDuration="3.715514158s" podCreationTimestamp="2025-11-26 15:17:12 +0000 UTC" firstStartedPulling="2025-11-26 15:17:13.662161109 +0000 UTC m=+3700.458931323" lastFinishedPulling="2025-11-26 15:17:15.105388499 +0000 UTC m=+3701.902158723" observedRunningTime="2025-11-26 15:17:15.713798827 +0000 UTC m=+3702.510569021" watchObservedRunningTime="2025-11-26 15:17:15.715514158 +0000 UTC m=+3702.512284382" Nov 26 15:17:22 crc kubenswrapper[5037]: I1126 15:17:22.825921 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-xr7nc" Nov 26 15:17:22 crc kubenswrapper[5037]: I1126 15:17:22.826925 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-xr7nc" Nov 26 15:17:22 crc kubenswrapper[5037]: I1126 15:17:22.903178 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-xr7nc" Nov 26 15:17:23 crc kubenswrapper[5037]: I1126 15:17:23.842425 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-xr7nc" Nov 26 15:17:23 crc kubenswrapper[5037]: I1126 15:17:23.895213 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xr7nc"] Nov 26 15:17:24 crc kubenswrapper[5037]: I1126 15:17:24.908901 5037 scope.go:117] "RemoveContainer" containerID="42296f169d08c107878aa61c500043474fe4185d69e7bce0567b40289de11c2f" Nov 26 15:17:24 crc kubenswrapper[5037]: E1126 15:17:24.909175 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:17:25 crc kubenswrapper[5037]: I1126 15:17:25.789325 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-xr7nc" podUID="f3f34f88-8a96-4518-9acc-7fa171229cf8" containerName="registry-server" containerID="cri-o://e6af1b8ce190ec68db2c84f7d208a96b4bcf82397914c7a852516d59115fd6bf" gracePeriod=2 Nov 26 15:17:26 crc kubenswrapper[5037]: I1126 15:17:26.267515 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xr7nc" Nov 26 15:17:26 crc kubenswrapper[5037]: I1126 15:17:26.327163 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-69c5x\" (UniqueName: \"kubernetes.io/projected/f3f34f88-8a96-4518-9acc-7fa171229cf8-kube-api-access-69c5x\") pod \"f3f34f88-8a96-4518-9acc-7fa171229cf8\" (UID: \"f3f34f88-8a96-4518-9acc-7fa171229cf8\") " Nov 26 15:17:26 crc kubenswrapper[5037]: I1126 15:17:26.327266 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f3f34f88-8a96-4518-9acc-7fa171229cf8-catalog-content\") pod \"f3f34f88-8a96-4518-9acc-7fa171229cf8\" (UID: \"f3f34f88-8a96-4518-9acc-7fa171229cf8\") " Nov 26 15:17:26 crc kubenswrapper[5037]: I1126 15:17:26.327337 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f3f34f88-8a96-4518-9acc-7fa171229cf8-utilities\") pod \"f3f34f88-8a96-4518-9acc-7fa171229cf8\" (UID: \"f3f34f88-8a96-4518-9acc-7fa171229cf8\") " Nov 26 15:17:26 crc kubenswrapper[5037]: I1126 15:17:26.328614 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f3f34f88-8a96-4518-9acc-7fa171229cf8-utilities" (OuterVolumeSpecName: "utilities") pod "f3f34f88-8a96-4518-9acc-7fa171229cf8" (UID: "f3f34f88-8a96-4518-9acc-7fa171229cf8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:17:26 crc kubenswrapper[5037]: I1126 15:17:26.333907 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f3f34f88-8a96-4518-9acc-7fa171229cf8-kube-api-access-69c5x" (OuterVolumeSpecName: "kube-api-access-69c5x") pod "f3f34f88-8a96-4518-9acc-7fa171229cf8" (UID: "f3f34f88-8a96-4518-9acc-7fa171229cf8"). InnerVolumeSpecName "kube-api-access-69c5x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:17:26 crc kubenswrapper[5037]: I1126 15:17:26.356834 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f3f34f88-8a96-4518-9acc-7fa171229cf8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f3f34f88-8a96-4518-9acc-7fa171229cf8" (UID: "f3f34f88-8a96-4518-9acc-7fa171229cf8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:17:26 crc kubenswrapper[5037]: I1126 15:17:26.428498 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-69c5x\" (UniqueName: \"kubernetes.io/projected/f3f34f88-8a96-4518-9acc-7fa171229cf8-kube-api-access-69c5x\") on node \"crc\" DevicePath \"\"" Nov 26 15:17:26 crc kubenswrapper[5037]: I1126 15:17:26.428552 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f3f34f88-8a96-4518-9acc-7fa171229cf8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 15:17:26 crc kubenswrapper[5037]: I1126 15:17:26.428572 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f3f34f88-8a96-4518-9acc-7fa171229cf8-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 15:17:26 crc kubenswrapper[5037]: I1126 15:17:26.800886 5037 generic.go:334] "Generic (PLEG): container finished" podID="f3f34f88-8a96-4518-9acc-7fa171229cf8" containerID="e6af1b8ce190ec68db2c84f7d208a96b4bcf82397914c7a852516d59115fd6bf" exitCode=0 Nov 26 15:17:26 crc kubenswrapper[5037]: I1126 15:17:26.800934 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xr7nc" event={"ID":"f3f34f88-8a96-4518-9acc-7fa171229cf8","Type":"ContainerDied","Data":"e6af1b8ce190ec68db2c84f7d208a96b4bcf82397914c7a852516d59115fd6bf"} Nov 26 15:17:26 crc kubenswrapper[5037]: I1126 15:17:26.800965 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xr7nc" event={"ID":"f3f34f88-8a96-4518-9acc-7fa171229cf8","Type":"ContainerDied","Data":"41f5eecc003eb21384ef6fdcf6a19c1e73bc62a0fa9c4913df9240f37c316f75"} Nov 26 15:17:26 crc kubenswrapper[5037]: I1126 15:17:26.800962 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xr7nc" Nov 26 15:17:26 crc kubenswrapper[5037]: I1126 15:17:26.801007 5037 scope.go:117] "RemoveContainer" containerID="e6af1b8ce190ec68db2c84f7d208a96b4bcf82397914c7a852516d59115fd6bf" Nov 26 15:17:26 crc kubenswrapper[5037]: I1126 15:17:26.854257 5037 scope.go:117] "RemoveContainer" containerID="588f30f25d606a498f0b1b247c7c3170288fefb8a0c5409a8d9784474046b52a" Nov 26 15:17:26 crc kubenswrapper[5037]: I1126 15:17:26.857988 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xr7nc"] Nov 26 15:17:26 crc kubenswrapper[5037]: I1126 15:17:26.879412 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-xr7nc"] Nov 26 15:17:26 crc kubenswrapper[5037]: I1126 15:17:26.880966 5037 scope.go:117] "RemoveContainer" containerID="b3e5591d65893a38fb4015cb6ae2f08b156c595bdad872358db430831db00ea5" Nov 26 15:17:26 crc kubenswrapper[5037]: I1126 15:17:26.929584 5037 scope.go:117] "RemoveContainer" containerID="e6af1b8ce190ec68db2c84f7d208a96b4bcf82397914c7a852516d59115fd6bf" Nov 26 15:17:26 crc kubenswrapper[5037]: E1126 15:17:26.930451 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e6af1b8ce190ec68db2c84f7d208a96b4bcf82397914c7a852516d59115fd6bf\": container with ID starting with e6af1b8ce190ec68db2c84f7d208a96b4bcf82397914c7a852516d59115fd6bf not found: ID does not exist" containerID="e6af1b8ce190ec68db2c84f7d208a96b4bcf82397914c7a852516d59115fd6bf" Nov 26 15:17:26 crc kubenswrapper[5037]: I1126 15:17:26.930800 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e6af1b8ce190ec68db2c84f7d208a96b4bcf82397914c7a852516d59115fd6bf"} err="failed to get container status \"e6af1b8ce190ec68db2c84f7d208a96b4bcf82397914c7a852516d59115fd6bf\": rpc error: code = NotFound desc = could not find container \"e6af1b8ce190ec68db2c84f7d208a96b4bcf82397914c7a852516d59115fd6bf\": container with ID starting with e6af1b8ce190ec68db2c84f7d208a96b4bcf82397914c7a852516d59115fd6bf not found: ID does not exist" Nov 26 15:17:26 crc kubenswrapper[5037]: I1126 15:17:26.930897 5037 scope.go:117] "RemoveContainer" containerID="588f30f25d606a498f0b1b247c7c3170288fefb8a0c5409a8d9784474046b52a" Nov 26 15:17:26 crc kubenswrapper[5037]: E1126 15:17:26.931766 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"588f30f25d606a498f0b1b247c7c3170288fefb8a0c5409a8d9784474046b52a\": container with ID starting with 588f30f25d606a498f0b1b247c7c3170288fefb8a0c5409a8d9784474046b52a not found: ID does not exist" containerID="588f30f25d606a498f0b1b247c7c3170288fefb8a0c5409a8d9784474046b52a" Nov 26 15:17:26 crc kubenswrapper[5037]: I1126 15:17:26.931827 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"588f30f25d606a498f0b1b247c7c3170288fefb8a0c5409a8d9784474046b52a"} err="failed to get container status \"588f30f25d606a498f0b1b247c7c3170288fefb8a0c5409a8d9784474046b52a\": rpc error: code = NotFound desc = could not find container \"588f30f25d606a498f0b1b247c7c3170288fefb8a0c5409a8d9784474046b52a\": container with ID starting with 588f30f25d606a498f0b1b247c7c3170288fefb8a0c5409a8d9784474046b52a not found: ID does not exist" Nov 26 15:17:26 crc kubenswrapper[5037]: I1126 15:17:26.931865 5037 scope.go:117] "RemoveContainer" containerID="b3e5591d65893a38fb4015cb6ae2f08b156c595bdad872358db430831db00ea5" Nov 26 15:17:26 crc kubenswrapper[5037]: E1126 15:17:26.932405 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b3e5591d65893a38fb4015cb6ae2f08b156c595bdad872358db430831db00ea5\": container with ID starting with b3e5591d65893a38fb4015cb6ae2f08b156c595bdad872358db430831db00ea5 not found: ID does not exist" containerID="b3e5591d65893a38fb4015cb6ae2f08b156c595bdad872358db430831db00ea5" Nov 26 15:17:26 crc kubenswrapper[5037]: I1126 15:17:26.932511 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3e5591d65893a38fb4015cb6ae2f08b156c595bdad872358db430831db00ea5"} err="failed to get container status \"b3e5591d65893a38fb4015cb6ae2f08b156c595bdad872358db430831db00ea5\": rpc error: code = NotFound desc = could not find container \"b3e5591d65893a38fb4015cb6ae2f08b156c595bdad872358db430831db00ea5\": container with ID starting with b3e5591d65893a38fb4015cb6ae2f08b156c595bdad872358db430831db00ea5 not found: ID does not exist" Nov 26 15:17:27 crc kubenswrapper[5037]: I1126 15:17:27.923592 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f3f34f88-8a96-4518-9acc-7fa171229cf8" path="/var/lib/kubelet/pods/f3f34f88-8a96-4518-9acc-7fa171229cf8/volumes" Nov 26 15:17:37 crc kubenswrapper[5037]: I1126 15:17:37.908218 5037 scope.go:117] "RemoveContainer" containerID="42296f169d08c107878aa61c500043474fe4185d69e7bce0567b40289de11c2f" Nov 26 15:17:37 crc kubenswrapper[5037]: E1126 15:17:37.909143 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:17:48 crc kubenswrapper[5037]: I1126 15:17:48.186082 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-rfv8m"] Nov 26 15:17:48 crc kubenswrapper[5037]: E1126 15:17:48.187379 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3f34f88-8a96-4518-9acc-7fa171229cf8" containerName="extract-content" Nov 26 15:17:48 crc kubenswrapper[5037]: I1126 15:17:48.187404 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3f34f88-8a96-4518-9acc-7fa171229cf8" containerName="extract-content" Nov 26 15:17:48 crc kubenswrapper[5037]: E1126 15:17:48.187474 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3f34f88-8a96-4518-9acc-7fa171229cf8" containerName="extract-utilities" Nov 26 15:17:48 crc kubenswrapper[5037]: I1126 15:17:48.187488 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3f34f88-8a96-4518-9acc-7fa171229cf8" containerName="extract-utilities" Nov 26 15:17:48 crc kubenswrapper[5037]: E1126 15:17:48.187509 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3f34f88-8a96-4518-9acc-7fa171229cf8" containerName="registry-server" Nov 26 15:17:48 crc kubenswrapper[5037]: I1126 15:17:48.187527 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3f34f88-8a96-4518-9acc-7fa171229cf8" containerName="registry-server" Nov 26 15:17:48 crc kubenswrapper[5037]: I1126 15:17:48.187797 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="f3f34f88-8a96-4518-9acc-7fa171229cf8" containerName="registry-server" Nov 26 15:17:48 crc kubenswrapper[5037]: I1126 15:17:48.189722 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rfv8m" Nov 26 15:17:48 crc kubenswrapper[5037]: I1126 15:17:48.208120 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rfv8m"] Nov 26 15:17:48 crc kubenswrapper[5037]: I1126 15:17:48.335271 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gjlrh\" (UniqueName: \"kubernetes.io/projected/469e3981-b529-4082-b10d-8b4442a7e7e4-kube-api-access-gjlrh\") pod \"community-operators-rfv8m\" (UID: \"469e3981-b529-4082-b10d-8b4442a7e7e4\") " pod="openshift-marketplace/community-operators-rfv8m" Nov 26 15:17:48 crc kubenswrapper[5037]: I1126 15:17:48.335352 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/469e3981-b529-4082-b10d-8b4442a7e7e4-catalog-content\") pod \"community-operators-rfv8m\" (UID: \"469e3981-b529-4082-b10d-8b4442a7e7e4\") " pod="openshift-marketplace/community-operators-rfv8m" Nov 26 15:17:48 crc kubenswrapper[5037]: I1126 15:17:48.335542 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/469e3981-b529-4082-b10d-8b4442a7e7e4-utilities\") pod \"community-operators-rfv8m\" (UID: \"469e3981-b529-4082-b10d-8b4442a7e7e4\") " pod="openshift-marketplace/community-operators-rfv8m" Nov 26 15:17:48 crc kubenswrapper[5037]: I1126 15:17:48.436904 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gjlrh\" (UniqueName: \"kubernetes.io/projected/469e3981-b529-4082-b10d-8b4442a7e7e4-kube-api-access-gjlrh\") pod \"community-operators-rfv8m\" (UID: \"469e3981-b529-4082-b10d-8b4442a7e7e4\") " pod="openshift-marketplace/community-operators-rfv8m" Nov 26 15:17:48 crc kubenswrapper[5037]: I1126 15:17:48.436971 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/469e3981-b529-4082-b10d-8b4442a7e7e4-catalog-content\") pod \"community-operators-rfv8m\" (UID: \"469e3981-b529-4082-b10d-8b4442a7e7e4\") " pod="openshift-marketplace/community-operators-rfv8m" Nov 26 15:17:48 crc kubenswrapper[5037]: I1126 15:17:48.437014 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/469e3981-b529-4082-b10d-8b4442a7e7e4-utilities\") pod \"community-operators-rfv8m\" (UID: \"469e3981-b529-4082-b10d-8b4442a7e7e4\") " pod="openshift-marketplace/community-operators-rfv8m" Nov 26 15:17:48 crc kubenswrapper[5037]: I1126 15:17:48.437602 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/469e3981-b529-4082-b10d-8b4442a7e7e4-utilities\") pod \"community-operators-rfv8m\" (UID: \"469e3981-b529-4082-b10d-8b4442a7e7e4\") " pod="openshift-marketplace/community-operators-rfv8m" Nov 26 15:17:48 crc kubenswrapper[5037]: I1126 15:17:48.437664 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/469e3981-b529-4082-b10d-8b4442a7e7e4-catalog-content\") pod \"community-operators-rfv8m\" (UID: \"469e3981-b529-4082-b10d-8b4442a7e7e4\") " pod="openshift-marketplace/community-operators-rfv8m" Nov 26 15:17:48 crc kubenswrapper[5037]: I1126 15:17:48.457769 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gjlrh\" (UniqueName: \"kubernetes.io/projected/469e3981-b529-4082-b10d-8b4442a7e7e4-kube-api-access-gjlrh\") pod \"community-operators-rfv8m\" (UID: \"469e3981-b529-4082-b10d-8b4442a7e7e4\") " pod="openshift-marketplace/community-operators-rfv8m" Nov 26 15:17:48 crc kubenswrapper[5037]: I1126 15:17:48.524253 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rfv8m" Nov 26 15:17:49 crc kubenswrapper[5037]: I1126 15:17:49.009222 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rfv8m"] Nov 26 15:17:50 crc kubenswrapper[5037]: I1126 15:17:50.026959 5037 generic.go:334] "Generic (PLEG): container finished" podID="469e3981-b529-4082-b10d-8b4442a7e7e4" containerID="51071bc07fa6dba045894d85403f13de4f0d7caf32ddbdc6612eda353fc58858" exitCode=0 Nov 26 15:17:50 crc kubenswrapper[5037]: I1126 15:17:50.027109 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rfv8m" event={"ID":"469e3981-b529-4082-b10d-8b4442a7e7e4","Type":"ContainerDied","Data":"51071bc07fa6dba045894d85403f13de4f0d7caf32ddbdc6612eda353fc58858"} Nov 26 15:17:50 crc kubenswrapper[5037]: I1126 15:17:50.027488 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rfv8m" event={"ID":"469e3981-b529-4082-b10d-8b4442a7e7e4","Type":"ContainerStarted","Data":"2c858189d26e44439c92fa203e87e3a1b41ffd5fdab8e9e31460c4b76b470073"} Nov 26 15:17:50 crc kubenswrapper[5037]: I1126 15:17:50.908465 5037 scope.go:117] "RemoveContainer" containerID="42296f169d08c107878aa61c500043474fe4185d69e7bce0567b40289de11c2f" Nov 26 15:17:50 crc kubenswrapper[5037]: E1126 15:17:50.908982 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:17:54 crc kubenswrapper[5037]: I1126 15:17:54.068206 5037 generic.go:334] "Generic (PLEG): container finished" podID="469e3981-b529-4082-b10d-8b4442a7e7e4" containerID="abcb60056c4732844e9b6bed082e6e241c9137e5e5c12ea47a145e79363b14de" exitCode=0 Nov 26 15:17:54 crc kubenswrapper[5037]: I1126 15:17:54.068319 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rfv8m" event={"ID":"469e3981-b529-4082-b10d-8b4442a7e7e4","Type":"ContainerDied","Data":"abcb60056c4732844e9b6bed082e6e241c9137e5e5c12ea47a145e79363b14de"} Nov 26 15:17:55 crc kubenswrapper[5037]: I1126 15:17:55.080792 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rfv8m" event={"ID":"469e3981-b529-4082-b10d-8b4442a7e7e4","Type":"ContainerStarted","Data":"94130e94669db9e2d7752ec92489a5b2254dc41ff2043e5ea13619e1b6a76ff0"} Nov 26 15:17:55 crc kubenswrapper[5037]: I1126 15:17:55.113486 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-rfv8m" podStartSLOduration=2.615636366 podStartE2EDuration="7.11345159s" podCreationTimestamp="2025-11-26 15:17:48 +0000 UTC" firstStartedPulling="2025-11-26 15:17:50.031674564 +0000 UTC m=+3736.828444778" lastFinishedPulling="2025-11-26 15:17:54.529489818 +0000 UTC m=+3741.326260002" observedRunningTime="2025-11-26 15:17:55.104917956 +0000 UTC m=+3741.901688140" watchObservedRunningTime="2025-11-26 15:17:55.11345159 +0000 UTC m=+3741.910221814" Nov 26 15:17:58 crc kubenswrapper[5037]: I1126 15:17:58.525349 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-rfv8m" Nov 26 15:17:58 crc kubenswrapper[5037]: I1126 15:17:58.525711 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-rfv8m" Nov 26 15:17:58 crc kubenswrapper[5037]: I1126 15:17:58.587759 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-rfv8m" Nov 26 15:17:59 crc kubenswrapper[5037]: I1126 15:17:59.164398 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-rfv8m" Nov 26 15:17:59 crc kubenswrapper[5037]: I1126 15:17:59.249289 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rfv8m"] Nov 26 15:17:59 crc kubenswrapper[5037]: I1126 15:17:59.312062 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-llzzn"] Nov 26 15:17:59 crc kubenswrapper[5037]: I1126 15:17:59.312421 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-llzzn" podUID="69c81dd8-253e-4f54-bf2d-68828864c7cd" containerName="registry-server" containerID="cri-o://230334f503e4f60713b8c05154fd1c3ed5a236985f7f9e4b63e9dbcb1497f54d" gracePeriod=2 Nov 26 15:18:00 crc kubenswrapper[5037]: I1126 15:18:00.142131 5037 generic.go:334] "Generic (PLEG): container finished" podID="69c81dd8-253e-4f54-bf2d-68828864c7cd" containerID="230334f503e4f60713b8c05154fd1c3ed5a236985f7f9e4b63e9dbcb1497f54d" exitCode=0 Nov 26 15:18:00 crc kubenswrapper[5037]: I1126 15:18:00.142241 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-llzzn" event={"ID":"69c81dd8-253e-4f54-bf2d-68828864c7cd","Type":"ContainerDied","Data":"230334f503e4f60713b8c05154fd1c3ed5a236985f7f9e4b63e9dbcb1497f54d"} Nov 26 15:18:00 crc kubenswrapper[5037]: I1126 15:18:00.201741 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-llzzn" Nov 26 15:18:00 crc kubenswrapper[5037]: I1126 15:18:00.334818 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/69c81dd8-253e-4f54-bf2d-68828864c7cd-catalog-content\") pod \"69c81dd8-253e-4f54-bf2d-68828864c7cd\" (UID: \"69c81dd8-253e-4f54-bf2d-68828864c7cd\") " Nov 26 15:18:00 crc kubenswrapper[5037]: I1126 15:18:00.334869 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f2kfn\" (UniqueName: \"kubernetes.io/projected/69c81dd8-253e-4f54-bf2d-68828864c7cd-kube-api-access-f2kfn\") pod \"69c81dd8-253e-4f54-bf2d-68828864c7cd\" (UID: \"69c81dd8-253e-4f54-bf2d-68828864c7cd\") " Nov 26 15:18:00 crc kubenswrapper[5037]: I1126 15:18:00.334949 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/69c81dd8-253e-4f54-bf2d-68828864c7cd-utilities\") pod \"69c81dd8-253e-4f54-bf2d-68828864c7cd\" (UID: \"69c81dd8-253e-4f54-bf2d-68828864c7cd\") " Nov 26 15:18:00 crc kubenswrapper[5037]: I1126 15:18:00.335968 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/69c81dd8-253e-4f54-bf2d-68828864c7cd-utilities" (OuterVolumeSpecName: "utilities") pod "69c81dd8-253e-4f54-bf2d-68828864c7cd" (UID: "69c81dd8-253e-4f54-bf2d-68828864c7cd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:18:00 crc kubenswrapper[5037]: I1126 15:18:00.339625 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69c81dd8-253e-4f54-bf2d-68828864c7cd-kube-api-access-f2kfn" (OuterVolumeSpecName: "kube-api-access-f2kfn") pod "69c81dd8-253e-4f54-bf2d-68828864c7cd" (UID: "69c81dd8-253e-4f54-bf2d-68828864c7cd"). InnerVolumeSpecName "kube-api-access-f2kfn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:18:00 crc kubenswrapper[5037]: I1126 15:18:00.383460 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/69c81dd8-253e-4f54-bf2d-68828864c7cd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "69c81dd8-253e-4f54-bf2d-68828864c7cd" (UID: "69c81dd8-253e-4f54-bf2d-68828864c7cd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:18:00 crc kubenswrapper[5037]: I1126 15:18:00.436802 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/69c81dd8-253e-4f54-bf2d-68828864c7cd-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 15:18:00 crc kubenswrapper[5037]: I1126 15:18:00.436835 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/69c81dd8-253e-4f54-bf2d-68828864c7cd-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 15:18:00 crc kubenswrapper[5037]: I1126 15:18:00.436846 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f2kfn\" (UniqueName: \"kubernetes.io/projected/69c81dd8-253e-4f54-bf2d-68828864c7cd-kube-api-access-f2kfn\") on node \"crc\" DevicePath \"\"" Nov 26 15:18:01 crc kubenswrapper[5037]: I1126 15:18:01.155448 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-llzzn" event={"ID":"69c81dd8-253e-4f54-bf2d-68828864c7cd","Type":"ContainerDied","Data":"27127e334cfdcaaea7cbaf72a9b8e09ebf1e347caaedfcc1b012ec169e8b8113"} Nov 26 15:18:01 crc kubenswrapper[5037]: I1126 15:18:01.155525 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-llzzn" Nov 26 15:18:01 crc kubenswrapper[5037]: I1126 15:18:01.155556 5037 scope.go:117] "RemoveContainer" containerID="230334f503e4f60713b8c05154fd1c3ed5a236985f7f9e4b63e9dbcb1497f54d" Nov 26 15:18:01 crc kubenswrapper[5037]: I1126 15:18:01.191523 5037 scope.go:117] "RemoveContainer" containerID="8972b27a4518cd740d54064e07265937b8aa7371478931ce0e2302e5df5ac453" Nov 26 15:18:01 crc kubenswrapper[5037]: I1126 15:18:01.221078 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-llzzn"] Nov 26 15:18:01 crc kubenswrapper[5037]: I1126 15:18:01.235631 5037 scope.go:117] "RemoveContainer" containerID="6e5172b592ffdaf6e02adf11d68981307f8b764a6394cf80c9ccb5749dba0b2e" Nov 26 15:18:01 crc kubenswrapper[5037]: I1126 15:18:01.238685 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-llzzn"] Nov 26 15:18:01 crc kubenswrapper[5037]: I1126 15:18:01.927188 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="69c81dd8-253e-4f54-bf2d-68828864c7cd" path="/var/lib/kubelet/pods/69c81dd8-253e-4f54-bf2d-68828864c7cd/volumes" Nov 26 15:18:04 crc kubenswrapper[5037]: I1126 15:18:04.908930 5037 scope.go:117] "RemoveContainer" containerID="42296f169d08c107878aa61c500043474fe4185d69e7bce0567b40289de11c2f" Nov 26 15:18:04 crc kubenswrapper[5037]: E1126 15:18:04.909642 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:18:16 crc kubenswrapper[5037]: I1126 15:18:16.908819 5037 scope.go:117] "RemoveContainer" containerID="42296f169d08c107878aa61c500043474fe4185d69e7bce0567b40289de11c2f" Nov 26 15:18:16 crc kubenswrapper[5037]: E1126 15:18:16.909980 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:18:30 crc kubenswrapper[5037]: I1126 15:18:30.909208 5037 scope.go:117] "RemoveContainer" containerID="42296f169d08c107878aa61c500043474fe4185d69e7bce0567b40289de11c2f" Nov 26 15:18:30 crc kubenswrapper[5037]: E1126 15:18:30.910351 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:18:44 crc kubenswrapper[5037]: I1126 15:18:44.908388 5037 scope.go:117] "RemoveContainer" containerID="42296f169d08c107878aa61c500043474fe4185d69e7bce0567b40289de11c2f" Nov 26 15:18:44 crc kubenswrapper[5037]: E1126 15:18:44.909274 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:18:51 crc kubenswrapper[5037]: I1126 15:18:51.189104 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-n4mcl"] Nov 26 15:18:51 crc kubenswrapper[5037]: E1126 15:18:51.190141 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69c81dd8-253e-4f54-bf2d-68828864c7cd" containerName="registry-server" Nov 26 15:18:51 crc kubenswrapper[5037]: I1126 15:18:51.190161 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="69c81dd8-253e-4f54-bf2d-68828864c7cd" containerName="registry-server" Nov 26 15:18:51 crc kubenswrapper[5037]: E1126 15:18:51.190186 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69c81dd8-253e-4f54-bf2d-68828864c7cd" containerName="extract-content" Nov 26 15:18:51 crc kubenswrapper[5037]: I1126 15:18:51.190198 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="69c81dd8-253e-4f54-bf2d-68828864c7cd" containerName="extract-content" Nov 26 15:18:51 crc kubenswrapper[5037]: E1126 15:18:51.190245 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69c81dd8-253e-4f54-bf2d-68828864c7cd" containerName="extract-utilities" Nov 26 15:18:51 crc kubenswrapper[5037]: I1126 15:18:51.190258 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="69c81dd8-253e-4f54-bf2d-68828864c7cd" containerName="extract-utilities" Nov 26 15:18:51 crc kubenswrapper[5037]: I1126 15:18:51.190521 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="69c81dd8-253e-4f54-bf2d-68828864c7cd" containerName="registry-server" Nov 26 15:18:51 crc kubenswrapper[5037]: I1126 15:18:51.192217 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-n4mcl" Nov 26 15:18:51 crc kubenswrapper[5037]: I1126 15:18:51.214102 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-n4mcl"] Nov 26 15:18:51 crc kubenswrapper[5037]: I1126 15:18:51.330575 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07832ee7-cf1a-4a84-9de6-74a4fb8f93eb-catalog-content\") pod \"certified-operators-n4mcl\" (UID: \"07832ee7-cf1a-4a84-9de6-74a4fb8f93eb\") " pod="openshift-marketplace/certified-operators-n4mcl" Nov 26 15:18:51 crc kubenswrapper[5037]: I1126 15:18:51.330651 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07832ee7-cf1a-4a84-9de6-74a4fb8f93eb-utilities\") pod \"certified-operators-n4mcl\" (UID: \"07832ee7-cf1a-4a84-9de6-74a4fb8f93eb\") " pod="openshift-marketplace/certified-operators-n4mcl" Nov 26 15:18:51 crc kubenswrapper[5037]: I1126 15:18:51.330703 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-grn82\" (UniqueName: \"kubernetes.io/projected/07832ee7-cf1a-4a84-9de6-74a4fb8f93eb-kube-api-access-grn82\") pod \"certified-operators-n4mcl\" (UID: \"07832ee7-cf1a-4a84-9de6-74a4fb8f93eb\") " pod="openshift-marketplace/certified-operators-n4mcl" Nov 26 15:18:51 crc kubenswrapper[5037]: I1126 15:18:51.432011 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07832ee7-cf1a-4a84-9de6-74a4fb8f93eb-catalog-content\") pod \"certified-operators-n4mcl\" (UID: \"07832ee7-cf1a-4a84-9de6-74a4fb8f93eb\") " pod="openshift-marketplace/certified-operators-n4mcl" Nov 26 15:18:51 crc kubenswrapper[5037]: I1126 15:18:51.432082 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07832ee7-cf1a-4a84-9de6-74a4fb8f93eb-utilities\") pod \"certified-operators-n4mcl\" (UID: \"07832ee7-cf1a-4a84-9de6-74a4fb8f93eb\") " pod="openshift-marketplace/certified-operators-n4mcl" Nov 26 15:18:51 crc kubenswrapper[5037]: I1126 15:18:51.432131 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-grn82\" (UniqueName: \"kubernetes.io/projected/07832ee7-cf1a-4a84-9de6-74a4fb8f93eb-kube-api-access-grn82\") pod \"certified-operators-n4mcl\" (UID: \"07832ee7-cf1a-4a84-9de6-74a4fb8f93eb\") " pod="openshift-marketplace/certified-operators-n4mcl" Nov 26 15:18:51 crc kubenswrapper[5037]: I1126 15:18:51.432901 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07832ee7-cf1a-4a84-9de6-74a4fb8f93eb-catalog-content\") pod \"certified-operators-n4mcl\" (UID: \"07832ee7-cf1a-4a84-9de6-74a4fb8f93eb\") " pod="openshift-marketplace/certified-operators-n4mcl" Nov 26 15:18:51 crc kubenswrapper[5037]: I1126 15:18:51.433004 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07832ee7-cf1a-4a84-9de6-74a4fb8f93eb-utilities\") pod \"certified-operators-n4mcl\" (UID: \"07832ee7-cf1a-4a84-9de6-74a4fb8f93eb\") " pod="openshift-marketplace/certified-operators-n4mcl" Nov 26 15:18:51 crc kubenswrapper[5037]: I1126 15:18:51.467748 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-grn82\" (UniqueName: \"kubernetes.io/projected/07832ee7-cf1a-4a84-9de6-74a4fb8f93eb-kube-api-access-grn82\") pod \"certified-operators-n4mcl\" (UID: \"07832ee7-cf1a-4a84-9de6-74a4fb8f93eb\") " pod="openshift-marketplace/certified-operators-n4mcl" Nov 26 15:18:51 crc kubenswrapper[5037]: I1126 15:18:51.529900 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-n4mcl" Nov 26 15:18:51 crc kubenswrapper[5037]: I1126 15:18:51.815976 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-n4mcl"] Nov 26 15:18:52 crc kubenswrapper[5037]: I1126 15:18:52.647354 5037 generic.go:334] "Generic (PLEG): container finished" podID="07832ee7-cf1a-4a84-9de6-74a4fb8f93eb" containerID="ab26f9e026ba420f8e2d4468b5fdae2bfddd2d237bc460fc5732bd6e1ec6b8ad" exitCode=0 Nov 26 15:18:52 crc kubenswrapper[5037]: I1126 15:18:52.647470 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n4mcl" event={"ID":"07832ee7-cf1a-4a84-9de6-74a4fb8f93eb","Type":"ContainerDied","Data":"ab26f9e026ba420f8e2d4468b5fdae2bfddd2d237bc460fc5732bd6e1ec6b8ad"} Nov 26 15:18:52 crc kubenswrapper[5037]: I1126 15:18:52.647783 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n4mcl" event={"ID":"07832ee7-cf1a-4a84-9de6-74a4fb8f93eb","Type":"ContainerStarted","Data":"ebb60235168b9523a79050e27a732a6d4318e634c451e6e1c35ed54e8acaccf9"} Nov 26 15:18:54 crc kubenswrapper[5037]: I1126 15:18:54.665720 5037 generic.go:334] "Generic (PLEG): container finished" podID="07832ee7-cf1a-4a84-9de6-74a4fb8f93eb" containerID="309297757d47c549db1383a26f82bdebcc2a515db555a17737dcb009afec6967" exitCode=0 Nov 26 15:18:54 crc kubenswrapper[5037]: I1126 15:18:54.665854 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n4mcl" event={"ID":"07832ee7-cf1a-4a84-9de6-74a4fb8f93eb","Type":"ContainerDied","Data":"309297757d47c549db1383a26f82bdebcc2a515db555a17737dcb009afec6967"} Nov 26 15:18:56 crc kubenswrapper[5037]: I1126 15:18:56.687528 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n4mcl" event={"ID":"07832ee7-cf1a-4a84-9de6-74a4fb8f93eb","Type":"ContainerStarted","Data":"0d94e3175bf31516e80d02d5d7cf849230e16deb4cfaa7a69c7ea3a33df9eecc"} Nov 26 15:18:56 crc kubenswrapper[5037]: I1126 15:18:56.718721 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-n4mcl" podStartSLOduration=2.662105684 podStartE2EDuration="5.718699747s" podCreationTimestamp="2025-11-26 15:18:51 +0000 UTC" firstStartedPulling="2025-11-26 15:18:52.649790434 +0000 UTC m=+3799.446560648" lastFinishedPulling="2025-11-26 15:18:55.706384527 +0000 UTC m=+3802.503154711" observedRunningTime="2025-11-26 15:18:56.71759658 +0000 UTC m=+3803.514366804" watchObservedRunningTime="2025-11-26 15:18:56.718699747 +0000 UTC m=+3803.515469941" Nov 26 15:18:58 crc kubenswrapper[5037]: I1126 15:18:58.908824 5037 scope.go:117] "RemoveContainer" containerID="42296f169d08c107878aa61c500043474fe4185d69e7bce0567b40289de11c2f" Nov 26 15:18:58 crc kubenswrapper[5037]: E1126 15:18:58.909584 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:19:01 crc kubenswrapper[5037]: I1126 15:19:01.530759 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-n4mcl" Nov 26 15:19:01 crc kubenswrapper[5037]: I1126 15:19:01.531444 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-n4mcl" Nov 26 15:19:01 crc kubenswrapper[5037]: I1126 15:19:01.613272 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-n4mcl" Nov 26 15:19:01 crc kubenswrapper[5037]: I1126 15:19:01.816560 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-n4mcl" Nov 26 15:19:01 crc kubenswrapper[5037]: I1126 15:19:01.884437 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-n4mcl"] Nov 26 15:19:03 crc kubenswrapper[5037]: I1126 15:19:03.754768 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-n4mcl" podUID="07832ee7-cf1a-4a84-9de6-74a4fb8f93eb" containerName="registry-server" containerID="cri-o://0d94e3175bf31516e80d02d5d7cf849230e16deb4cfaa7a69c7ea3a33df9eecc" gracePeriod=2 Nov 26 15:19:04 crc kubenswrapper[5037]: I1126 15:19:04.227414 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-n4mcl" Nov 26 15:19:04 crc kubenswrapper[5037]: I1126 15:19:04.330191 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07832ee7-cf1a-4a84-9de6-74a4fb8f93eb-catalog-content\") pod \"07832ee7-cf1a-4a84-9de6-74a4fb8f93eb\" (UID: \"07832ee7-cf1a-4a84-9de6-74a4fb8f93eb\") " Nov 26 15:19:04 crc kubenswrapper[5037]: I1126 15:19:04.330274 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-grn82\" (UniqueName: \"kubernetes.io/projected/07832ee7-cf1a-4a84-9de6-74a4fb8f93eb-kube-api-access-grn82\") pod \"07832ee7-cf1a-4a84-9de6-74a4fb8f93eb\" (UID: \"07832ee7-cf1a-4a84-9de6-74a4fb8f93eb\") " Nov 26 15:19:04 crc kubenswrapper[5037]: I1126 15:19:04.330351 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07832ee7-cf1a-4a84-9de6-74a4fb8f93eb-utilities\") pod \"07832ee7-cf1a-4a84-9de6-74a4fb8f93eb\" (UID: \"07832ee7-cf1a-4a84-9de6-74a4fb8f93eb\") " Nov 26 15:19:04 crc kubenswrapper[5037]: I1126 15:19:04.331766 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07832ee7-cf1a-4a84-9de6-74a4fb8f93eb-utilities" (OuterVolumeSpecName: "utilities") pod "07832ee7-cf1a-4a84-9de6-74a4fb8f93eb" (UID: "07832ee7-cf1a-4a84-9de6-74a4fb8f93eb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:19:04 crc kubenswrapper[5037]: I1126 15:19:04.344722 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07832ee7-cf1a-4a84-9de6-74a4fb8f93eb-kube-api-access-grn82" (OuterVolumeSpecName: "kube-api-access-grn82") pod "07832ee7-cf1a-4a84-9de6-74a4fb8f93eb" (UID: "07832ee7-cf1a-4a84-9de6-74a4fb8f93eb"). InnerVolumeSpecName "kube-api-access-grn82". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:19:04 crc kubenswrapper[5037]: I1126 15:19:04.383680 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07832ee7-cf1a-4a84-9de6-74a4fb8f93eb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "07832ee7-cf1a-4a84-9de6-74a4fb8f93eb" (UID: "07832ee7-cf1a-4a84-9de6-74a4fb8f93eb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:19:04 crc kubenswrapper[5037]: I1126 15:19:04.432042 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07832ee7-cf1a-4a84-9de6-74a4fb8f93eb-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 15:19:04 crc kubenswrapper[5037]: I1126 15:19:04.432083 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-grn82\" (UniqueName: \"kubernetes.io/projected/07832ee7-cf1a-4a84-9de6-74a4fb8f93eb-kube-api-access-grn82\") on node \"crc\" DevicePath \"\"" Nov 26 15:19:04 crc kubenswrapper[5037]: I1126 15:19:04.432100 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07832ee7-cf1a-4a84-9de6-74a4fb8f93eb-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 15:19:04 crc kubenswrapper[5037]: I1126 15:19:04.763423 5037 generic.go:334] "Generic (PLEG): container finished" podID="07832ee7-cf1a-4a84-9de6-74a4fb8f93eb" containerID="0d94e3175bf31516e80d02d5d7cf849230e16deb4cfaa7a69c7ea3a33df9eecc" exitCode=0 Nov 26 15:19:04 crc kubenswrapper[5037]: I1126 15:19:04.763492 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-n4mcl" Nov 26 15:19:04 crc kubenswrapper[5037]: I1126 15:19:04.763501 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n4mcl" event={"ID":"07832ee7-cf1a-4a84-9de6-74a4fb8f93eb","Type":"ContainerDied","Data":"0d94e3175bf31516e80d02d5d7cf849230e16deb4cfaa7a69c7ea3a33df9eecc"} Nov 26 15:19:04 crc kubenswrapper[5037]: I1126 15:19:04.763856 5037 scope.go:117] "RemoveContainer" containerID="0d94e3175bf31516e80d02d5d7cf849230e16deb4cfaa7a69c7ea3a33df9eecc" Nov 26 15:19:04 crc kubenswrapper[5037]: I1126 15:19:04.763813 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n4mcl" event={"ID":"07832ee7-cf1a-4a84-9de6-74a4fb8f93eb","Type":"ContainerDied","Data":"ebb60235168b9523a79050e27a732a6d4318e634c451e6e1c35ed54e8acaccf9"} Nov 26 15:19:04 crc kubenswrapper[5037]: I1126 15:19:04.783973 5037 scope.go:117] "RemoveContainer" containerID="309297757d47c549db1383a26f82bdebcc2a515db555a17737dcb009afec6967" Nov 26 15:19:04 crc kubenswrapper[5037]: I1126 15:19:04.803814 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-n4mcl"] Nov 26 15:19:04 crc kubenswrapper[5037]: I1126 15:19:04.810822 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-n4mcl"] Nov 26 15:19:05 crc kubenswrapper[5037]: I1126 15:19:05.232034 5037 scope.go:117] "RemoveContainer" containerID="ab26f9e026ba420f8e2d4468b5fdae2bfddd2d237bc460fc5732bd6e1ec6b8ad" Nov 26 15:19:05 crc kubenswrapper[5037]: I1126 15:19:05.267170 5037 scope.go:117] "RemoveContainer" containerID="0d94e3175bf31516e80d02d5d7cf849230e16deb4cfaa7a69c7ea3a33df9eecc" Nov 26 15:19:05 crc kubenswrapper[5037]: E1126 15:19:05.267838 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d94e3175bf31516e80d02d5d7cf849230e16deb4cfaa7a69c7ea3a33df9eecc\": container with ID starting with 0d94e3175bf31516e80d02d5d7cf849230e16deb4cfaa7a69c7ea3a33df9eecc not found: ID does not exist" containerID="0d94e3175bf31516e80d02d5d7cf849230e16deb4cfaa7a69c7ea3a33df9eecc" Nov 26 15:19:05 crc kubenswrapper[5037]: I1126 15:19:05.267937 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d94e3175bf31516e80d02d5d7cf849230e16deb4cfaa7a69c7ea3a33df9eecc"} err="failed to get container status \"0d94e3175bf31516e80d02d5d7cf849230e16deb4cfaa7a69c7ea3a33df9eecc\": rpc error: code = NotFound desc = could not find container \"0d94e3175bf31516e80d02d5d7cf849230e16deb4cfaa7a69c7ea3a33df9eecc\": container with ID starting with 0d94e3175bf31516e80d02d5d7cf849230e16deb4cfaa7a69c7ea3a33df9eecc not found: ID does not exist" Nov 26 15:19:05 crc kubenswrapper[5037]: I1126 15:19:05.268016 5037 scope.go:117] "RemoveContainer" containerID="309297757d47c549db1383a26f82bdebcc2a515db555a17737dcb009afec6967" Nov 26 15:19:05 crc kubenswrapper[5037]: E1126 15:19:05.268900 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"309297757d47c549db1383a26f82bdebcc2a515db555a17737dcb009afec6967\": container with ID starting with 309297757d47c549db1383a26f82bdebcc2a515db555a17737dcb009afec6967 not found: ID does not exist" containerID="309297757d47c549db1383a26f82bdebcc2a515db555a17737dcb009afec6967" Nov 26 15:19:05 crc kubenswrapper[5037]: I1126 15:19:05.268976 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"309297757d47c549db1383a26f82bdebcc2a515db555a17737dcb009afec6967"} err="failed to get container status \"309297757d47c549db1383a26f82bdebcc2a515db555a17737dcb009afec6967\": rpc error: code = NotFound desc = could not find container \"309297757d47c549db1383a26f82bdebcc2a515db555a17737dcb009afec6967\": container with ID starting with 309297757d47c549db1383a26f82bdebcc2a515db555a17737dcb009afec6967 not found: ID does not exist" Nov 26 15:19:05 crc kubenswrapper[5037]: I1126 15:19:05.269023 5037 scope.go:117] "RemoveContainer" containerID="ab26f9e026ba420f8e2d4468b5fdae2bfddd2d237bc460fc5732bd6e1ec6b8ad" Nov 26 15:19:05 crc kubenswrapper[5037]: E1126 15:19:05.269800 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ab26f9e026ba420f8e2d4468b5fdae2bfddd2d237bc460fc5732bd6e1ec6b8ad\": container with ID starting with ab26f9e026ba420f8e2d4468b5fdae2bfddd2d237bc460fc5732bd6e1ec6b8ad not found: ID does not exist" containerID="ab26f9e026ba420f8e2d4468b5fdae2bfddd2d237bc460fc5732bd6e1ec6b8ad" Nov 26 15:19:05 crc kubenswrapper[5037]: I1126 15:19:05.269970 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab26f9e026ba420f8e2d4468b5fdae2bfddd2d237bc460fc5732bd6e1ec6b8ad"} err="failed to get container status \"ab26f9e026ba420f8e2d4468b5fdae2bfddd2d237bc460fc5732bd6e1ec6b8ad\": rpc error: code = NotFound desc = could not find container \"ab26f9e026ba420f8e2d4468b5fdae2bfddd2d237bc460fc5732bd6e1ec6b8ad\": container with ID starting with ab26f9e026ba420f8e2d4468b5fdae2bfddd2d237bc460fc5732bd6e1ec6b8ad not found: ID does not exist" Nov 26 15:19:05 crc kubenswrapper[5037]: I1126 15:19:05.926548 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="07832ee7-cf1a-4a84-9de6-74a4fb8f93eb" path="/var/lib/kubelet/pods/07832ee7-cf1a-4a84-9de6-74a4fb8f93eb/volumes" Nov 26 15:19:13 crc kubenswrapper[5037]: I1126 15:19:13.911132 5037 scope.go:117] "RemoveContainer" containerID="42296f169d08c107878aa61c500043474fe4185d69e7bce0567b40289de11c2f" Nov 26 15:19:13 crc kubenswrapper[5037]: E1126 15:19:13.912718 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:19:24 crc kubenswrapper[5037]: I1126 15:19:24.908751 5037 scope.go:117] "RemoveContainer" containerID="42296f169d08c107878aa61c500043474fe4185d69e7bce0567b40289de11c2f" Nov 26 15:19:24 crc kubenswrapper[5037]: E1126 15:19:24.909820 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:19:39 crc kubenswrapper[5037]: I1126 15:19:39.908592 5037 scope.go:117] "RemoveContainer" containerID="42296f169d08c107878aa61c500043474fe4185d69e7bce0567b40289de11c2f" Nov 26 15:19:39 crc kubenswrapper[5037]: E1126 15:19:39.909671 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:19:50 crc kubenswrapper[5037]: I1126 15:19:50.908953 5037 scope.go:117] "RemoveContainer" containerID="42296f169d08c107878aa61c500043474fe4185d69e7bce0567b40289de11c2f" Nov 26 15:19:50 crc kubenswrapper[5037]: E1126 15:19:50.910612 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:20:01 crc kubenswrapper[5037]: I1126 15:20:01.908214 5037 scope.go:117] "RemoveContainer" containerID="42296f169d08c107878aa61c500043474fe4185d69e7bce0567b40289de11c2f" Nov 26 15:20:01 crc kubenswrapper[5037]: E1126 15:20:01.909792 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:20:15 crc kubenswrapper[5037]: I1126 15:20:15.908326 5037 scope.go:117] "RemoveContainer" containerID="42296f169d08c107878aa61c500043474fe4185d69e7bce0567b40289de11c2f" Nov 26 15:20:16 crc kubenswrapper[5037]: I1126 15:20:16.600325 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" event={"ID":"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb","Type":"ContainerStarted","Data":"f424ca1df5c395727791e89e1a2557f549d4a3c041db474114fd414689aca330"} Nov 26 15:22:41 crc kubenswrapper[5037]: I1126 15:22:41.247672 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:22:41 crc kubenswrapper[5037]: I1126 15:22:41.248417 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:23:11 crc kubenswrapper[5037]: I1126 15:23:11.247663 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:23:11 crc kubenswrapper[5037]: I1126 15:23:11.248431 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:23:41 crc kubenswrapper[5037]: I1126 15:23:41.246989 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:23:41 crc kubenswrapper[5037]: I1126 15:23:41.247585 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:23:41 crc kubenswrapper[5037]: I1126 15:23:41.247640 5037 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" Nov 26 15:23:41 crc kubenswrapper[5037]: I1126 15:23:41.248480 5037 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f424ca1df5c395727791e89e1a2557f549d4a3c041db474114fd414689aca330"} pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 15:23:41 crc kubenswrapper[5037]: I1126 15:23:41.248570 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" containerID="cri-o://f424ca1df5c395727791e89e1a2557f549d4a3c041db474114fd414689aca330" gracePeriod=600 Nov 26 15:23:41 crc kubenswrapper[5037]: I1126 15:23:41.533470 5037 generic.go:334] "Generic (PLEG): container finished" podID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerID="f424ca1df5c395727791e89e1a2557f549d4a3c041db474114fd414689aca330" exitCode=0 Nov 26 15:23:41 crc kubenswrapper[5037]: I1126 15:23:41.533520 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" event={"ID":"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb","Type":"ContainerDied","Data":"f424ca1df5c395727791e89e1a2557f549d4a3c041db474114fd414689aca330"} Nov 26 15:23:41 crc kubenswrapper[5037]: I1126 15:23:41.533557 5037 scope.go:117] "RemoveContainer" containerID="42296f169d08c107878aa61c500043474fe4185d69e7bce0567b40289de11c2f" Nov 26 15:23:42 crc kubenswrapper[5037]: I1126 15:23:42.548465 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" event={"ID":"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb","Type":"ContainerStarted","Data":"05e40e9234bad18844d5cc74a70d5e8fe2909fa15220761260c02d3d4b8e4c11"} Nov 26 15:25:41 crc kubenswrapper[5037]: I1126 15:25:41.247806 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:25:41 crc kubenswrapper[5037]: I1126 15:25:41.248647 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:26:11 crc kubenswrapper[5037]: I1126 15:26:11.247094 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:26:11 crc kubenswrapper[5037]: I1126 15:26:11.247589 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:26:11 crc kubenswrapper[5037]: I1126 15:26:11.774634 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-ss8pc"] Nov 26 15:26:11 crc kubenswrapper[5037]: E1126 15:26:11.775087 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07832ee7-cf1a-4a84-9de6-74a4fb8f93eb" containerName="registry-server" Nov 26 15:26:11 crc kubenswrapper[5037]: I1126 15:26:11.775107 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="07832ee7-cf1a-4a84-9de6-74a4fb8f93eb" containerName="registry-server" Nov 26 15:26:11 crc kubenswrapper[5037]: E1126 15:26:11.775140 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07832ee7-cf1a-4a84-9de6-74a4fb8f93eb" containerName="extract-content" Nov 26 15:26:11 crc kubenswrapper[5037]: I1126 15:26:11.775152 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="07832ee7-cf1a-4a84-9de6-74a4fb8f93eb" containerName="extract-content" Nov 26 15:26:11 crc kubenswrapper[5037]: E1126 15:26:11.775189 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07832ee7-cf1a-4a84-9de6-74a4fb8f93eb" containerName="extract-utilities" Nov 26 15:26:11 crc kubenswrapper[5037]: I1126 15:26:11.775206 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="07832ee7-cf1a-4a84-9de6-74a4fb8f93eb" containerName="extract-utilities" Nov 26 15:26:11 crc kubenswrapper[5037]: I1126 15:26:11.775496 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="07832ee7-cf1a-4a84-9de6-74a4fb8f93eb" containerName="registry-server" Nov 26 15:26:11 crc kubenswrapper[5037]: I1126 15:26:11.777192 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ss8pc" Nov 26 15:26:11 crc kubenswrapper[5037]: I1126 15:26:11.810170 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ss8pc"] Nov 26 15:26:11 crc kubenswrapper[5037]: I1126 15:26:11.836706 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x7xmq\" (UniqueName: \"kubernetes.io/projected/ec5ddb67-47a6-4e64-8218-d38c8d092932-kube-api-access-x7xmq\") pod \"redhat-operators-ss8pc\" (UID: \"ec5ddb67-47a6-4e64-8218-d38c8d092932\") " pod="openshift-marketplace/redhat-operators-ss8pc" Nov 26 15:26:11 crc kubenswrapper[5037]: I1126 15:26:11.836776 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ec5ddb67-47a6-4e64-8218-d38c8d092932-utilities\") pod \"redhat-operators-ss8pc\" (UID: \"ec5ddb67-47a6-4e64-8218-d38c8d092932\") " pod="openshift-marketplace/redhat-operators-ss8pc" Nov 26 15:26:11 crc kubenswrapper[5037]: I1126 15:26:11.836830 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ec5ddb67-47a6-4e64-8218-d38c8d092932-catalog-content\") pod \"redhat-operators-ss8pc\" (UID: \"ec5ddb67-47a6-4e64-8218-d38c8d092932\") " pod="openshift-marketplace/redhat-operators-ss8pc" Nov 26 15:26:11 crc kubenswrapper[5037]: I1126 15:26:11.938809 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x7xmq\" (UniqueName: \"kubernetes.io/projected/ec5ddb67-47a6-4e64-8218-d38c8d092932-kube-api-access-x7xmq\") pod \"redhat-operators-ss8pc\" (UID: \"ec5ddb67-47a6-4e64-8218-d38c8d092932\") " pod="openshift-marketplace/redhat-operators-ss8pc" Nov 26 15:26:11 crc kubenswrapper[5037]: I1126 15:26:11.939169 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ec5ddb67-47a6-4e64-8218-d38c8d092932-utilities\") pod \"redhat-operators-ss8pc\" (UID: \"ec5ddb67-47a6-4e64-8218-d38c8d092932\") " pod="openshift-marketplace/redhat-operators-ss8pc" Nov 26 15:26:11 crc kubenswrapper[5037]: I1126 15:26:11.939219 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ec5ddb67-47a6-4e64-8218-d38c8d092932-catalog-content\") pod \"redhat-operators-ss8pc\" (UID: \"ec5ddb67-47a6-4e64-8218-d38c8d092932\") " pod="openshift-marketplace/redhat-operators-ss8pc" Nov 26 15:26:11 crc kubenswrapper[5037]: I1126 15:26:11.939753 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ec5ddb67-47a6-4e64-8218-d38c8d092932-catalog-content\") pod \"redhat-operators-ss8pc\" (UID: \"ec5ddb67-47a6-4e64-8218-d38c8d092932\") " pod="openshift-marketplace/redhat-operators-ss8pc" Nov 26 15:26:11 crc kubenswrapper[5037]: I1126 15:26:11.940055 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ec5ddb67-47a6-4e64-8218-d38c8d092932-utilities\") pod \"redhat-operators-ss8pc\" (UID: \"ec5ddb67-47a6-4e64-8218-d38c8d092932\") " pod="openshift-marketplace/redhat-operators-ss8pc" Nov 26 15:26:11 crc kubenswrapper[5037]: I1126 15:26:11.968404 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x7xmq\" (UniqueName: \"kubernetes.io/projected/ec5ddb67-47a6-4e64-8218-d38c8d092932-kube-api-access-x7xmq\") pod \"redhat-operators-ss8pc\" (UID: \"ec5ddb67-47a6-4e64-8218-d38c8d092932\") " pod="openshift-marketplace/redhat-operators-ss8pc" Nov 26 15:26:12 crc kubenswrapper[5037]: I1126 15:26:12.108386 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ss8pc" Nov 26 15:26:12 crc kubenswrapper[5037]: I1126 15:26:12.602633 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ss8pc"] Nov 26 15:26:13 crc kubenswrapper[5037]: I1126 15:26:13.011066 5037 generic.go:334] "Generic (PLEG): container finished" podID="ec5ddb67-47a6-4e64-8218-d38c8d092932" containerID="d5bebbce03c7b8dab0f025da1ea4e122452008cae0ce90ee386b7ecb7e8b727c" exitCode=0 Nov 26 15:26:13 crc kubenswrapper[5037]: I1126 15:26:13.011128 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ss8pc" event={"ID":"ec5ddb67-47a6-4e64-8218-d38c8d092932","Type":"ContainerDied","Data":"d5bebbce03c7b8dab0f025da1ea4e122452008cae0ce90ee386b7ecb7e8b727c"} Nov 26 15:26:13 crc kubenswrapper[5037]: I1126 15:26:13.011197 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ss8pc" event={"ID":"ec5ddb67-47a6-4e64-8218-d38c8d092932","Type":"ContainerStarted","Data":"70a08116f554e0926272d120bd519ffee7a39023a9646047bd5724224c67a3f8"} Nov 26 15:26:13 crc kubenswrapper[5037]: I1126 15:26:13.013168 5037 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 15:26:15 crc kubenswrapper[5037]: I1126 15:26:15.024904 5037 generic.go:334] "Generic (PLEG): container finished" podID="ec5ddb67-47a6-4e64-8218-d38c8d092932" containerID="8a74f5bcef81b9fd1a9907128d0b2523e7edd89df8fd3ea294f4c440b9233aa5" exitCode=0 Nov 26 15:26:15 crc kubenswrapper[5037]: I1126 15:26:15.025117 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ss8pc" event={"ID":"ec5ddb67-47a6-4e64-8218-d38c8d092932","Type":"ContainerDied","Data":"8a74f5bcef81b9fd1a9907128d0b2523e7edd89df8fd3ea294f4c440b9233aa5"} Nov 26 15:26:16 crc kubenswrapper[5037]: I1126 15:26:16.036054 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ss8pc" event={"ID":"ec5ddb67-47a6-4e64-8218-d38c8d092932","Type":"ContainerStarted","Data":"2e576f329fdf623b3443d3cc8a2890904ebefd3c5cb3b6840bbe1b83aa76ca3a"} Nov 26 15:26:16 crc kubenswrapper[5037]: I1126 15:26:16.068348 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-ss8pc" podStartSLOduration=2.467858981 podStartE2EDuration="5.068281652s" podCreationTimestamp="2025-11-26 15:26:11 +0000 UTC" firstStartedPulling="2025-11-26 15:26:13.012809727 +0000 UTC m=+4239.809579921" lastFinishedPulling="2025-11-26 15:26:15.613232368 +0000 UTC m=+4242.410002592" observedRunningTime="2025-11-26 15:26:16.065960246 +0000 UTC m=+4242.862730460" watchObservedRunningTime="2025-11-26 15:26:16.068281652 +0000 UTC m=+4242.865051876" Nov 26 15:26:22 crc kubenswrapper[5037]: I1126 15:26:22.109535 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-ss8pc" Nov 26 15:26:22 crc kubenswrapper[5037]: I1126 15:26:22.110460 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-ss8pc" Nov 26 15:26:22 crc kubenswrapper[5037]: I1126 15:26:22.197581 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-ss8pc" Nov 26 15:26:23 crc kubenswrapper[5037]: I1126 15:26:23.146984 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-ss8pc" Nov 26 15:26:23 crc kubenswrapper[5037]: I1126 15:26:23.190311 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ss8pc"] Nov 26 15:26:25 crc kubenswrapper[5037]: I1126 15:26:25.116827 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-ss8pc" podUID="ec5ddb67-47a6-4e64-8218-d38c8d092932" containerName="registry-server" containerID="cri-o://2e576f329fdf623b3443d3cc8a2890904ebefd3c5cb3b6840bbe1b83aa76ca3a" gracePeriod=2 Nov 26 15:26:26 crc kubenswrapper[5037]: I1126 15:26:26.142605 5037 generic.go:334] "Generic (PLEG): container finished" podID="ec5ddb67-47a6-4e64-8218-d38c8d092932" containerID="2e576f329fdf623b3443d3cc8a2890904ebefd3c5cb3b6840bbe1b83aa76ca3a" exitCode=0 Nov 26 15:26:26 crc kubenswrapper[5037]: I1126 15:26:26.142979 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ss8pc" event={"ID":"ec5ddb67-47a6-4e64-8218-d38c8d092932","Type":"ContainerDied","Data":"2e576f329fdf623b3443d3cc8a2890904ebefd3c5cb3b6840bbe1b83aa76ca3a"} Nov 26 15:26:26 crc kubenswrapper[5037]: I1126 15:26:26.192782 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ss8pc" Nov 26 15:26:26 crc kubenswrapper[5037]: I1126 15:26:26.367399 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ec5ddb67-47a6-4e64-8218-d38c8d092932-catalog-content\") pod \"ec5ddb67-47a6-4e64-8218-d38c8d092932\" (UID: \"ec5ddb67-47a6-4e64-8218-d38c8d092932\") " Nov 26 15:26:26 crc kubenswrapper[5037]: I1126 15:26:26.367447 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ec5ddb67-47a6-4e64-8218-d38c8d092932-utilities\") pod \"ec5ddb67-47a6-4e64-8218-d38c8d092932\" (UID: \"ec5ddb67-47a6-4e64-8218-d38c8d092932\") " Nov 26 15:26:26 crc kubenswrapper[5037]: I1126 15:26:26.367473 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7xmq\" (UniqueName: \"kubernetes.io/projected/ec5ddb67-47a6-4e64-8218-d38c8d092932-kube-api-access-x7xmq\") pod \"ec5ddb67-47a6-4e64-8218-d38c8d092932\" (UID: \"ec5ddb67-47a6-4e64-8218-d38c8d092932\") " Nov 26 15:26:26 crc kubenswrapper[5037]: I1126 15:26:26.368470 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ec5ddb67-47a6-4e64-8218-d38c8d092932-utilities" (OuterVolumeSpecName: "utilities") pod "ec5ddb67-47a6-4e64-8218-d38c8d092932" (UID: "ec5ddb67-47a6-4e64-8218-d38c8d092932"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:26:26 crc kubenswrapper[5037]: I1126 15:26:26.380128 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec5ddb67-47a6-4e64-8218-d38c8d092932-kube-api-access-x7xmq" (OuterVolumeSpecName: "kube-api-access-x7xmq") pod "ec5ddb67-47a6-4e64-8218-d38c8d092932" (UID: "ec5ddb67-47a6-4e64-8218-d38c8d092932"). InnerVolumeSpecName "kube-api-access-x7xmq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:26 crc kubenswrapper[5037]: I1126 15:26:26.457819 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ec5ddb67-47a6-4e64-8218-d38c8d092932-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ec5ddb67-47a6-4e64-8218-d38c8d092932" (UID: "ec5ddb67-47a6-4e64-8218-d38c8d092932"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:26:26 crc kubenswrapper[5037]: I1126 15:26:26.469050 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ec5ddb67-47a6-4e64-8218-d38c8d092932-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:26 crc kubenswrapper[5037]: I1126 15:26:26.469085 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ec5ddb67-47a6-4e64-8218-d38c8d092932-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:26 crc kubenswrapper[5037]: I1126 15:26:26.469095 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7xmq\" (UniqueName: \"kubernetes.io/projected/ec5ddb67-47a6-4e64-8218-d38c8d092932-kube-api-access-x7xmq\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:27 crc kubenswrapper[5037]: I1126 15:26:27.160076 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ss8pc" event={"ID":"ec5ddb67-47a6-4e64-8218-d38c8d092932","Type":"ContainerDied","Data":"70a08116f554e0926272d120bd519ffee7a39023a9646047bd5724224c67a3f8"} Nov 26 15:26:27 crc kubenswrapper[5037]: I1126 15:26:27.160258 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ss8pc" Nov 26 15:26:27 crc kubenswrapper[5037]: I1126 15:26:27.160725 5037 scope.go:117] "RemoveContainer" containerID="2e576f329fdf623b3443d3cc8a2890904ebefd3c5cb3b6840bbe1b83aa76ca3a" Nov 26 15:26:27 crc kubenswrapper[5037]: I1126 15:26:27.202114 5037 scope.go:117] "RemoveContainer" containerID="8a74f5bcef81b9fd1a9907128d0b2523e7edd89df8fd3ea294f4c440b9233aa5" Nov 26 15:26:27 crc kubenswrapper[5037]: I1126 15:26:27.219260 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ss8pc"] Nov 26 15:26:27 crc kubenswrapper[5037]: I1126 15:26:27.227246 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-ss8pc"] Nov 26 15:26:27 crc kubenswrapper[5037]: I1126 15:26:27.281369 5037 scope.go:117] "RemoveContainer" containerID="d5bebbce03c7b8dab0f025da1ea4e122452008cae0ce90ee386b7ecb7e8b727c" Nov 26 15:26:27 crc kubenswrapper[5037]: I1126 15:26:27.921853 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ec5ddb67-47a6-4e64-8218-d38c8d092932" path="/var/lib/kubelet/pods/ec5ddb67-47a6-4e64-8218-d38c8d092932/volumes" Nov 26 15:26:41 crc kubenswrapper[5037]: I1126 15:26:41.247273 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:26:41 crc kubenswrapper[5037]: I1126 15:26:41.248131 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5037]: I1126 15:26:41.248203 5037 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" Nov 26 15:26:41 crc kubenswrapper[5037]: I1126 15:26:41.249218 5037 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"05e40e9234bad18844d5cc74a70d5e8fe2909fa15220761260c02d3d4b8e4c11"} pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 15:26:41 crc kubenswrapper[5037]: I1126 15:26:41.249369 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" containerID="cri-o://05e40e9234bad18844d5cc74a70d5e8fe2909fa15220761260c02d3d4b8e4c11" gracePeriod=600 Nov 26 15:26:41 crc kubenswrapper[5037]: E1126 15:26:41.395769 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:26:42 crc kubenswrapper[5037]: I1126 15:26:42.305794 5037 generic.go:334] "Generic (PLEG): container finished" podID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerID="05e40e9234bad18844d5cc74a70d5e8fe2909fa15220761260c02d3d4b8e4c11" exitCode=0 Nov 26 15:26:42 crc kubenswrapper[5037]: I1126 15:26:42.305844 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" event={"ID":"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb","Type":"ContainerDied","Data":"05e40e9234bad18844d5cc74a70d5e8fe2909fa15220761260c02d3d4b8e4c11"} Nov 26 15:26:42 crc kubenswrapper[5037]: I1126 15:26:42.306164 5037 scope.go:117] "RemoveContainer" containerID="f424ca1df5c395727791e89e1a2557f549d4a3c041db474114fd414689aca330" Nov 26 15:26:42 crc kubenswrapper[5037]: I1126 15:26:42.306941 5037 scope.go:117] "RemoveContainer" containerID="05e40e9234bad18844d5cc74a70d5e8fe2909fa15220761260c02d3d4b8e4c11" Nov 26 15:26:42 crc kubenswrapper[5037]: E1126 15:26:42.307517 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:26:54 crc kubenswrapper[5037]: I1126 15:26:54.909219 5037 scope.go:117] "RemoveContainer" containerID="05e40e9234bad18844d5cc74a70d5e8fe2909fa15220761260c02d3d4b8e4c11" Nov 26 15:26:54 crc kubenswrapper[5037]: E1126 15:26:54.910257 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:27:07 crc kubenswrapper[5037]: I1126 15:27:07.910437 5037 scope.go:117] "RemoveContainer" containerID="05e40e9234bad18844d5cc74a70d5e8fe2909fa15220761260c02d3d4b8e4c11" Nov 26 15:27:07 crc kubenswrapper[5037]: E1126 15:27:07.911597 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:27:22 crc kubenswrapper[5037]: I1126 15:27:22.908456 5037 scope.go:117] "RemoveContainer" containerID="05e40e9234bad18844d5cc74a70d5e8fe2909fa15220761260c02d3d4b8e4c11" Nov 26 15:27:22 crc kubenswrapper[5037]: E1126 15:27:22.909439 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:27:34 crc kubenswrapper[5037]: I1126 15:27:34.909045 5037 scope.go:117] "RemoveContainer" containerID="05e40e9234bad18844d5cc74a70d5e8fe2909fa15220761260c02d3d4b8e4c11" Nov 26 15:27:34 crc kubenswrapper[5037]: E1126 15:27:34.910009 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:27:49 crc kubenswrapper[5037]: I1126 15:27:49.908153 5037 scope.go:117] "RemoveContainer" containerID="05e40e9234bad18844d5cc74a70d5e8fe2909fa15220761260c02d3d4b8e4c11" Nov 26 15:27:49 crc kubenswrapper[5037]: E1126 15:27:49.909004 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:27:55 crc kubenswrapper[5037]: I1126 15:27:55.201030 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-4stpx"] Nov 26 15:27:55 crc kubenswrapper[5037]: E1126 15:27:55.201861 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec5ddb67-47a6-4e64-8218-d38c8d092932" containerName="registry-server" Nov 26 15:27:55 crc kubenswrapper[5037]: I1126 15:27:55.201875 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec5ddb67-47a6-4e64-8218-d38c8d092932" containerName="registry-server" Nov 26 15:27:55 crc kubenswrapper[5037]: E1126 15:27:55.201893 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec5ddb67-47a6-4e64-8218-d38c8d092932" containerName="extract-content" Nov 26 15:27:55 crc kubenswrapper[5037]: I1126 15:27:55.201903 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec5ddb67-47a6-4e64-8218-d38c8d092932" containerName="extract-content" Nov 26 15:27:55 crc kubenswrapper[5037]: E1126 15:27:55.201924 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec5ddb67-47a6-4e64-8218-d38c8d092932" containerName="extract-utilities" Nov 26 15:27:55 crc kubenswrapper[5037]: I1126 15:27:55.201932 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec5ddb67-47a6-4e64-8218-d38c8d092932" containerName="extract-utilities" Nov 26 15:27:55 crc kubenswrapper[5037]: I1126 15:27:55.202115 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec5ddb67-47a6-4e64-8218-d38c8d092932" containerName="registry-server" Nov 26 15:27:55 crc kubenswrapper[5037]: I1126 15:27:55.203326 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4stpx" Nov 26 15:27:55 crc kubenswrapper[5037]: I1126 15:27:55.226059 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4stpx"] Nov 26 15:27:55 crc kubenswrapper[5037]: I1126 15:27:55.375045 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1a45bd33-c650-4a7f-86b6-db3c4c16617d-catalog-content\") pod \"community-operators-4stpx\" (UID: \"1a45bd33-c650-4a7f-86b6-db3c4c16617d\") " pod="openshift-marketplace/community-operators-4stpx" Nov 26 15:27:55 crc kubenswrapper[5037]: I1126 15:27:55.375116 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1a45bd33-c650-4a7f-86b6-db3c4c16617d-utilities\") pod \"community-operators-4stpx\" (UID: \"1a45bd33-c650-4a7f-86b6-db3c4c16617d\") " pod="openshift-marketplace/community-operators-4stpx" Nov 26 15:27:55 crc kubenswrapper[5037]: I1126 15:27:55.375197 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r575w\" (UniqueName: \"kubernetes.io/projected/1a45bd33-c650-4a7f-86b6-db3c4c16617d-kube-api-access-r575w\") pod \"community-operators-4stpx\" (UID: \"1a45bd33-c650-4a7f-86b6-db3c4c16617d\") " pod="openshift-marketplace/community-operators-4stpx" Nov 26 15:27:55 crc kubenswrapper[5037]: I1126 15:27:55.476221 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1a45bd33-c650-4a7f-86b6-db3c4c16617d-catalog-content\") pod \"community-operators-4stpx\" (UID: \"1a45bd33-c650-4a7f-86b6-db3c4c16617d\") " pod="openshift-marketplace/community-operators-4stpx" Nov 26 15:27:55 crc kubenswrapper[5037]: I1126 15:27:55.476265 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1a45bd33-c650-4a7f-86b6-db3c4c16617d-utilities\") pod \"community-operators-4stpx\" (UID: \"1a45bd33-c650-4a7f-86b6-db3c4c16617d\") " pod="openshift-marketplace/community-operators-4stpx" Nov 26 15:27:55 crc kubenswrapper[5037]: I1126 15:27:55.476312 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r575w\" (UniqueName: \"kubernetes.io/projected/1a45bd33-c650-4a7f-86b6-db3c4c16617d-kube-api-access-r575w\") pod \"community-operators-4stpx\" (UID: \"1a45bd33-c650-4a7f-86b6-db3c4c16617d\") " pod="openshift-marketplace/community-operators-4stpx" Nov 26 15:27:55 crc kubenswrapper[5037]: I1126 15:27:55.477363 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1a45bd33-c650-4a7f-86b6-db3c4c16617d-utilities\") pod \"community-operators-4stpx\" (UID: \"1a45bd33-c650-4a7f-86b6-db3c4c16617d\") " pod="openshift-marketplace/community-operators-4stpx" Nov 26 15:27:55 crc kubenswrapper[5037]: I1126 15:27:55.477474 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1a45bd33-c650-4a7f-86b6-db3c4c16617d-catalog-content\") pod \"community-operators-4stpx\" (UID: \"1a45bd33-c650-4a7f-86b6-db3c4c16617d\") " pod="openshift-marketplace/community-operators-4stpx" Nov 26 15:27:55 crc kubenswrapper[5037]: I1126 15:27:55.503336 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r575w\" (UniqueName: \"kubernetes.io/projected/1a45bd33-c650-4a7f-86b6-db3c4c16617d-kube-api-access-r575w\") pod \"community-operators-4stpx\" (UID: \"1a45bd33-c650-4a7f-86b6-db3c4c16617d\") " pod="openshift-marketplace/community-operators-4stpx" Nov 26 15:27:55 crc kubenswrapper[5037]: I1126 15:27:55.539964 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4stpx" Nov 26 15:27:56 crc kubenswrapper[5037]: I1126 15:27:56.041543 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4stpx"] Nov 26 15:27:56 crc kubenswrapper[5037]: I1126 15:27:56.098931 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4stpx" event={"ID":"1a45bd33-c650-4a7f-86b6-db3c4c16617d","Type":"ContainerStarted","Data":"cb6b419d8a7c07e836c5be742be1c736323adccd810e7b9de0ea615368ff3c4c"} Nov 26 15:27:57 crc kubenswrapper[5037]: I1126 15:27:57.111584 5037 generic.go:334] "Generic (PLEG): container finished" podID="1a45bd33-c650-4a7f-86b6-db3c4c16617d" containerID="cb4159f02ff04847ee7165002573ec440ef5a9db8f97dfcf2b3741a106d1c28e" exitCode=0 Nov 26 15:27:57 crc kubenswrapper[5037]: I1126 15:27:57.111954 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4stpx" event={"ID":"1a45bd33-c650-4a7f-86b6-db3c4c16617d","Type":"ContainerDied","Data":"cb4159f02ff04847ee7165002573ec440ef5a9db8f97dfcf2b3741a106d1c28e"} Nov 26 15:27:57 crc kubenswrapper[5037]: I1126 15:27:57.403968 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-8wzq6"] Nov 26 15:27:57 crc kubenswrapper[5037]: I1126 15:27:57.406754 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8wzq6" Nov 26 15:27:57 crc kubenswrapper[5037]: I1126 15:27:57.471940 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8wzq6"] Nov 26 15:27:57 crc kubenswrapper[5037]: I1126 15:27:57.509372 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g8kw4\" (UniqueName: \"kubernetes.io/projected/1c0416c3-8555-463e-a63a-e485e06aa3f9-kube-api-access-g8kw4\") pod \"redhat-marketplace-8wzq6\" (UID: \"1c0416c3-8555-463e-a63a-e485e06aa3f9\") " pod="openshift-marketplace/redhat-marketplace-8wzq6" Nov 26 15:27:57 crc kubenswrapper[5037]: I1126 15:27:57.509445 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c0416c3-8555-463e-a63a-e485e06aa3f9-utilities\") pod \"redhat-marketplace-8wzq6\" (UID: \"1c0416c3-8555-463e-a63a-e485e06aa3f9\") " pod="openshift-marketplace/redhat-marketplace-8wzq6" Nov 26 15:27:57 crc kubenswrapper[5037]: I1126 15:27:57.509568 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c0416c3-8555-463e-a63a-e485e06aa3f9-catalog-content\") pod \"redhat-marketplace-8wzq6\" (UID: \"1c0416c3-8555-463e-a63a-e485e06aa3f9\") " pod="openshift-marketplace/redhat-marketplace-8wzq6" Nov 26 15:27:57 crc kubenswrapper[5037]: I1126 15:27:57.611259 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c0416c3-8555-463e-a63a-e485e06aa3f9-catalog-content\") pod \"redhat-marketplace-8wzq6\" (UID: \"1c0416c3-8555-463e-a63a-e485e06aa3f9\") " pod="openshift-marketplace/redhat-marketplace-8wzq6" Nov 26 15:27:57 crc kubenswrapper[5037]: I1126 15:27:57.611406 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g8kw4\" (UniqueName: \"kubernetes.io/projected/1c0416c3-8555-463e-a63a-e485e06aa3f9-kube-api-access-g8kw4\") pod \"redhat-marketplace-8wzq6\" (UID: \"1c0416c3-8555-463e-a63a-e485e06aa3f9\") " pod="openshift-marketplace/redhat-marketplace-8wzq6" Nov 26 15:27:57 crc kubenswrapper[5037]: I1126 15:27:57.611456 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c0416c3-8555-463e-a63a-e485e06aa3f9-utilities\") pod \"redhat-marketplace-8wzq6\" (UID: \"1c0416c3-8555-463e-a63a-e485e06aa3f9\") " pod="openshift-marketplace/redhat-marketplace-8wzq6" Nov 26 15:27:57 crc kubenswrapper[5037]: I1126 15:27:57.611851 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c0416c3-8555-463e-a63a-e485e06aa3f9-catalog-content\") pod \"redhat-marketplace-8wzq6\" (UID: \"1c0416c3-8555-463e-a63a-e485e06aa3f9\") " pod="openshift-marketplace/redhat-marketplace-8wzq6" Nov 26 15:27:57 crc kubenswrapper[5037]: I1126 15:27:57.612267 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c0416c3-8555-463e-a63a-e485e06aa3f9-utilities\") pod \"redhat-marketplace-8wzq6\" (UID: \"1c0416c3-8555-463e-a63a-e485e06aa3f9\") " pod="openshift-marketplace/redhat-marketplace-8wzq6" Nov 26 15:27:57 crc kubenswrapper[5037]: I1126 15:27:57.634459 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g8kw4\" (UniqueName: \"kubernetes.io/projected/1c0416c3-8555-463e-a63a-e485e06aa3f9-kube-api-access-g8kw4\") pod \"redhat-marketplace-8wzq6\" (UID: \"1c0416c3-8555-463e-a63a-e485e06aa3f9\") " pod="openshift-marketplace/redhat-marketplace-8wzq6" Nov 26 15:27:57 crc kubenswrapper[5037]: I1126 15:27:57.732912 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8wzq6" Nov 26 15:27:58 crc kubenswrapper[5037]: I1126 15:27:58.119550 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4stpx" event={"ID":"1a45bd33-c650-4a7f-86b6-db3c4c16617d","Type":"ContainerStarted","Data":"d3740b2227e93e4c8e03b21c5434a2441fd286abd3737fff4a73ba7286d578e6"} Nov 26 15:27:58 crc kubenswrapper[5037]: I1126 15:27:58.298033 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8wzq6"] Nov 26 15:27:58 crc kubenswrapper[5037]: W1126 15:27:58.335548 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1c0416c3_8555_463e_a63a_e485e06aa3f9.slice/crio-7b0bc19d23293ea1757f971fe91e627864d316c87b94ccac2ea0b5be18ec1f91 WatchSource:0}: Error finding container 7b0bc19d23293ea1757f971fe91e627864d316c87b94ccac2ea0b5be18ec1f91: Status 404 returned error can't find the container with id 7b0bc19d23293ea1757f971fe91e627864d316c87b94ccac2ea0b5be18ec1f91 Nov 26 15:27:59 crc kubenswrapper[5037]: I1126 15:27:59.128330 5037 generic.go:334] "Generic (PLEG): container finished" podID="1c0416c3-8555-463e-a63a-e485e06aa3f9" containerID="0e0669e26f3e2fc7b217a99e03180f9ac6cfd3967182aa6e42698ab5096806e0" exitCode=0 Nov 26 15:27:59 crc kubenswrapper[5037]: I1126 15:27:59.128428 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8wzq6" event={"ID":"1c0416c3-8555-463e-a63a-e485e06aa3f9","Type":"ContainerDied","Data":"0e0669e26f3e2fc7b217a99e03180f9ac6cfd3967182aa6e42698ab5096806e0"} Nov 26 15:27:59 crc kubenswrapper[5037]: I1126 15:27:59.128465 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8wzq6" event={"ID":"1c0416c3-8555-463e-a63a-e485e06aa3f9","Type":"ContainerStarted","Data":"7b0bc19d23293ea1757f971fe91e627864d316c87b94ccac2ea0b5be18ec1f91"} Nov 26 15:27:59 crc kubenswrapper[5037]: I1126 15:27:59.130257 5037 generic.go:334] "Generic (PLEG): container finished" podID="1a45bd33-c650-4a7f-86b6-db3c4c16617d" containerID="d3740b2227e93e4c8e03b21c5434a2441fd286abd3737fff4a73ba7286d578e6" exitCode=0 Nov 26 15:27:59 crc kubenswrapper[5037]: I1126 15:27:59.130331 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4stpx" event={"ID":"1a45bd33-c650-4a7f-86b6-db3c4c16617d","Type":"ContainerDied","Data":"d3740b2227e93e4c8e03b21c5434a2441fd286abd3737fff4a73ba7286d578e6"} Nov 26 15:28:00 crc kubenswrapper[5037]: I1126 15:28:00.140438 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8wzq6" event={"ID":"1c0416c3-8555-463e-a63a-e485e06aa3f9","Type":"ContainerStarted","Data":"017deaa989167047a100269bb052cb956325dd1db981c3a0acd3502d4804d52b"} Nov 26 15:28:01 crc kubenswrapper[5037]: I1126 15:28:01.151016 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4stpx" event={"ID":"1a45bd33-c650-4a7f-86b6-db3c4c16617d","Type":"ContainerStarted","Data":"6297150fef78a4bedce12672f0f0e6db60ef7c60b2c79f232aa8a13e9e38ed20"} Nov 26 15:28:01 crc kubenswrapper[5037]: I1126 15:28:01.153801 5037 generic.go:334] "Generic (PLEG): container finished" podID="1c0416c3-8555-463e-a63a-e485e06aa3f9" containerID="017deaa989167047a100269bb052cb956325dd1db981c3a0acd3502d4804d52b" exitCode=0 Nov 26 15:28:01 crc kubenswrapper[5037]: I1126 15:28:01.153861 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8wzq6" event={"ID":"1c0416c3-8555-463e-a63a-e485e06aa3f9","Type":"ContainerDied","Data":"017deaa989167047a100269bb052cb956325dd1db981c3a0acd3502d4804d52b"} Nov 26 15:28:01 crc kubenswrapper[5037]: I1126 15:28:01.175349 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-4stpx" podStartSLOduration=3.315297241 podStartE2EDuration="6.175320399s" podCreationTimestamp="2025-11-26 15:27:55 +0000 UTC" firstStartedPulling="2025-11-26 15:27:57.114964801 +0000 UTC m=+4343.911735025" lastFinishedPulling="2025-11-26 15:27:59.974987989 +0000 UTC m=+4346.771758183" observedRunningTime="2025-11-26 15:28:01.170738829 +0000 UTC m=+4347.967509073" watchObservedRunningTime="2025-11-26 15:28:01.175320399 +0000 UTC m=+4347.972090623" Nov 26 15:28:02 crc kubenswrapper[5037]: I1126 15:28:02.175329 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8wzq6" event={"ID":"1c0416c3-8555-463e-a63a-e485e06aa3f9","Type":"ContainerStarted","Data":"3d0f714da2570725d7f460bce8480c8d2ae683d21bb7b55a401f95572af59856"} Nov 26 15:28:02 crc kubenswrapper[5037]: I1126 15:28:02.202407 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-8wzq6" podStartSLOduration=2.603900757 podStartE2EDuration="5.202391652s" podCreationTimestamp="2025-11-26 15:27:57 +0000 UTC" firstStartedPulling="2025-11-26 15:27:59.132672436 +0000 UTC m=+4345.929442630" lastFinishedPulling="2025-11-26 15:28:01.731163301 +0000 UTC m=+4348.527933525" observedRunningTime="2025-11-26 15:28:02.199832082 +0000 UTC m=+4348.996602286" watchObservedRunningTime="2025-11-26 15:28:02.202391652 +0000 UTC m=+4348.999161836" Nov 26 15:28:04 crc kubenswrapper[5037]: I1126 15:28:04.908701 5037 scope.go:117] "RemoveContainer" containerID="05e40e9234bad18844d5cc74a70d5e8fe2909fa15220761260c02d3d4b8e4c11" Nov 26 15:28:04 crc kubenswrapper[5037]: E1126 15:28:04.909476 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:28:05 crc kubenswrapper[5037]: I1126 15:28:05.541305 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-4stpx" Nov 26 15:28:05 crc kubenswrapper[5037]: I1126 15:28:05.541364 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-4stpx" Nov 26 15:28:05 crc kubenswrapper[5037]: I1126 15:28:05.599229 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-4stpx" Nov 26 15:28:06 crc kubenswrapper[5037]: I1126 15:28:06.261832 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-4stpx" Nov 26 15:28:06 crc kubenswrapper[5037]: I1126 15:28:06.987654 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4stpx"] Nov 26 15:28:07 crc kubenswrapper[5037]: I1126 15:28:07.734306 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-8wzq6" Nov 26 15:28:07 crc kubenswrapper[5037]: I1126 15:28:07.735534 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-8wzq6" Nov 26 15:28:07 crc kubenswrapper[5037]: I1126 15:28:07.799601 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-8wzq6" Nov 26 15:28:08 crc kubenswrapper[5037]: I1126 15:28:08.223323 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-4stpx" podUID="1a45bd33-c650-4a7f-86b6-db3c4c16617d" containerName="registry-server" containerID="cri-o://6297150fef78a4bedce12672f0f0e6db60ef7c60b2c79f232aa8a13e9e38ed20" gracePeriod=2 Nov 26 15:28:08 crc kubenswrapper[5037]: I1126 15:28:08.299792 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-8wzq6" Nov 26 15:28:08 crc kubenswrapper[5037]: I1126 15:28:08.890613 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4stpx" Nov 26 15:28:09 crc kubenswrapper[5037]: I1126 15:28:09.004141 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1a45bd33-c650-4a7f-86b6-db3c4c16617d-catalog-content\") pod \"1a45bd33-c650-4a7f-86b6-db3c4c16617d\" (UID: \"1a45bd33-c650-4a7f-86b6-db3c4c16617d\") " Nov 26 15:28:09 crc kubenswrapper[5037]: I1126 15:28:09.004421 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r575w\" (UniqueName: \"kubernetes.io/projected/1a45bd33-c650-4a7f-86b6-db3c4c16617d-kube-api-access-r575w\") pod \"1a45bd33-c650-4a7f-86b6-db3c4c16617d\" (UID: \"1a45bd33-c650-4a7f-86b6-db3c4c16617d\") " Nov 26 15:28:09 crc kubenswrapper[5037]: I1126 15:28:09.004489 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1a45bd33-c650-4a7f-86b6-db3c4c16617d-utilities\") pod \"1a45bd33-c650-4a7f-86b6-db3c4c16617d\" (UID: \"1a45bd33-c650-4a7f-86b6-db3c4c16617d\") " Nov 26 15:28:09 crc kubenswrapper[5037]: I1126 15:28:09.005900 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1a45bd33-c650-4a7f-86b6-db3c4c16617d-utilities" (OuterVolumeSpecName: "utilities") pod "1a45bd33-c650-4a7f-86b6-db3c4c16617d" (UID: "1a45bd33-c650-4a7f-86b6-db3c4c16617d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:28:09 crc kubenswrapper[5037]: I1126 15:28:09.010856 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a45bd33-c650-4a7f-86b6-db3c4c16617d-kube-api-access-r575w" (OuterVolumeSpecName: "kube-api-access-r575w") pod "1a45bd33-c650-4a7f-86b6-db3c4c16617d" (UID: "1a45bd33-c650-4a7f-86b6-db3c4c16617d"). InnerVolumeSpecName "kube-api-access-r575w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:28:09 crc kubenswrapper[5037]: I1126 15:28:09.107673 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r575w\" (UniqueName: \"kubernetes.io/projected/1a45bd33-c650-4a7f-86b6-db3c4c16617d-kube-api-access-r575w\") on node \"crc\" DevicePath \"\"" Nov 26 15:28:09 crc kubenswrapper[5037]: I1126 15:28:09.107726 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1a45bd33-c650-4a7f-86b6-db3c4c16617d-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 15:28:09 crc kubenswrapper[5037]: I1126 15:28:09.218538 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1a45bd33-c650-4a7f-86b6-db3c4c16617d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1a45bd33-c650-4a7f-86b6-db3c4c16617d" (UID: "1a45bd33-c650-4a7f-86b6-db3c4c16617d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:28:09 crc kubenswrapper[5037]: I1126 15:28:09.235859 5037 generic.go:334] "Generic (PLEG): container finished" podID="1a45bd33-c650-4a7f-86b6-db3c4c16617d" containerID="6297150fef78a4bedce12672f0f0e6db60ef7c60b2c79f232aa8a13e9e38ed20" exitCode=0 Nov 26 15:28:09 crc kubenswrapper[5037]: I1126 15:28:09.236523 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4stpx" event={"ID":"1a45bd33-c650-4a7f-86b6-db3c4c16617d","Type":"ContainerDied","Data":"6297150fef78a4bedce12672f0f0e6db60ef7c60b2c79f232aa8a13e9e38ed20"} Nov 26 15:28:09 crc kubenswrapper[5037]: I1126 15:28:09.236599 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4stpx" event={"ID":"1a45bd33-c650-4a7f-86b6-db3c4c16617d","Type":"ContainerDied","Data":"cb6b419d8a7c07e836c5be742be1c736323adccd810e7b9de0ea615368ff3c4c"} Nov 26 15:28:09 crc kubenswrapper[5037]: I1126 15:28:09.236636 5037 scope.go:117] "RemoveContainer" containerID="6297150fef78a4bedce12672f0f0e6db60ef7c60b2c79f232aa8a13e9e38ed20" Nov 26 15:28:09 crc kubenswrapper[5037]: I1126 15:28:09.236666 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4stpx" Nov 26 15:28:09 crc kubenswrapper[5037]: I1126 15:28:09.270498 5037 scope.go:117] "RemoveContainer" containerID="d3740b2227e93e4c8e03b21c5434a2441fd286abd3737fff4a73ba7286d578e6" Nov 26 15:28:09 crc kubenswrapper[5037]: I1126 15:28:09.293753 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4stpx"] Nov 26 15:28:09 crc kubenswrapper[5037]: I1126 15:28:09.300308 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-4stpx"] Nov 26 15:28:09 crc kubenswrapper[5037]: I1126 15:28:09.311117 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1a45bd33-c650-4a7f-86b6-db3c4c16617d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 15:28:09 crc kubenswrapper[5037]: I1126 15:28:09.320051 5037 scope.go:117] "RemoveContainer" containerID="cb4159f02ff04847ee7165002573ec440ef5a9db8f97dfcf2b3741a106d1c28e" Nov 26 15:28:09 crc kubenswrapper[5037]: I1126 15:28:09.343613 5037 scope.go:117] "RemoveContainer" containerID="6297150fef78a4bedce12672f0f0e6db60ef7c60b2c79f232aa8a13e9e38ed20" Nov 26 15:28:09 crc kubenswrapper[5037]: E1126 15:28:09.345717 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6297150fef78a4bedce12672f0f0e6db60ef7c60b2c79f232aa8a13e9e38ed20\": container with ID starting with 6297150fef78a4bedce12672f0f0e6db60ef7c60b2c79f232aa8a13e9e38ed20 not found: ID does not exist" containerID="6297150fef78a4bedce12672f0f0e6db60ef7c60b2c79f232aa8a13e9e38ed20" Nov 26 15:28:09 crc kubenswrapper[5037]: I1126 15:28:09.345802 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6297150fef78a4bedce12672f0f0e6db60ef7c60b2c79f232aa8a13e9e38ed20"} err="failed to get container status \"6297150fef78a4bedce12672f0f0e6db60ef7c60b2c79f232aa8a13e9e38ed20\": rpc error: code = NotFound desc = could not find container \"6297150fef78a4bedce12672f0f0e6db60ef7c60b2c79f232aa8a13e9e38ed20\": container with ID starting with 6297150fef78a4bedce12672f0f0e6db60ef7c60b2c79f232aa8a13e9e38ed20 not found: ID does not exist" Nov 26 15:28:09 crc kubenswrapper[5037]: I1126 15:28:09.345850 5037 scope.go:117] "RemoveContainer" containerID="d3740b2227e93e4c8e03b21c5434a2441fd286abd3737fff4a73ba7286d578e6" Nov 26 15:28:09 crc kubenswrapper[5037]: E1126 15:28:09.346435 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d3740b2227e93e4c8e03b21c5434a2441fd286abd3737fff4a73ba7286d578e6\": container with ID starting with d3740b2227e93e4c8e03b21c5434a2441fd286abd3737fff4a73ba7286d578e6 not found: ID does not exist" containerID="d3740b2227e93e4c8e03b21c5434a2441fd286abd3737fff4a73ba7286d578e6" Nov 26 15:28:09 crc kubenswrapper[5037]: I1126 15:28:09.346500 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d3740b2227e93e4c8e03b21c5434a2441fd286abd3737fff4a73ba7286d578e6"} err="failed to get container status \"d3740b2227e93e4c8e03b21c5434a2441fd286abd3737fff4a73ba7286d578e6\": rpc error: code = NotFound desc = could not find container \"d3740b2227e93e4c8e03b21c5434a2441fd286abd3737fff4a73ba7286d578e6\": container with ID starting with d3740b2227e93e4c8e03b21c5434a2441fd286abd3737fff4a73ba7286d578e6 not found: ID does not exist" Nov 26 15:28:09 crc kubenswrapper[5037]: I1126 15:28:09.346538 5037 scope.go:117] "RemoveContainer" containerID="cb4159f02ff04847ee7165002573ec440ef5a9db8f97dfcf2b3741a106d1c28e" Nov 26 15:28:09 crc kubenswrapper[5037]: E1126 15:28:09.346853 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cb4159f02ff04847ee7165002573ec440ef5a9db8f97dfcf2b3741a106d1c28e\": container with ID starting with cb4159f02ff04847ee7165002573ec440ef5a9db8f97dfcf2b3741a106d1c28e not found: ID does not exist" containerID="cb4159f02ff04847ee7165002573ec440ef5a9db8f97dfcf2b3741a106d1c28e" Nov 26 15:28:09 crc kubenswrapper[5037]: I1126 15:28:09.346906 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb4159f02ff04847ee7165002573ec440ef5a9db8f97dfcf2b3741a106d1c28e"} err="failed to get container status \"cb4159f02ff04847ee7165002573ec440ef5a9db8f97dfcf2b3741a106d1c28e\": rpc error: code = NotFound desc = could not find container \"cb4159f02ff04847ee7165002573ec440ef5a9db8f97dfcf2b3741a106d1c28e\": container with ID starting with cb4159f02ff04847ee7165002573ec440ef5a9db8f97dfcf2b3741a106d1c28e not found: ID does not exist" Nov 26 15:28:09 crc kubenswrapper[5037]: I1126 15:28:09.790985 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8wzq6"] Nov 26 15:28:09 crc kubenswrapper[5037]: I1126 15:28:09.921956 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1a45bd33-c650-4a7f-86b6-db3c4c16617d" path="/var/lib/kubelet/pods/1a45bd33-c650-4a7f-86b6-db3c4c16617d/volumes" Nov 26 15:28:11 crc kubenswrapper[5037]: I1126 15:28:11.256114 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-8wzq6" podUID="1c0416c3-8555-463e-a63a-e485e06aa3f9" containerName="registry-server" containerID="cri-o://3d0f714da2570725d7f460bce8480c8d2ae683d21bb7b55a401f95572af59856" gracePeriod=2 Nov 26 15:28:11 crc kubenswrapper[5037]: I1126 15:28:11.673834 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8wzq6" Nov 26 15:28:11 crc kubenswrapper[5037]: I1126 15:28:11.848912 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c0416c3-8555-463e-a63a-e485e06aa3f9-utilities\") pod \"1c0416c3-8555-463e-a63a-e485e06aa3f9\" (UID: \"1c0416c3-8555-463e-a63a-e485e06aa3f9\") " Nov 26 15:28:11 crc kubenswrapper[5037]: I1126 15:28:11.849045 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c0416c3-8555-463e-a63a-e485e06aa3f9-catalog-content\") pod \"1c0416c3-8555-463e-a63a-e485e06aa3f9\" (UID: \"1c0416c3-8555-463e-a63a-e485e06aa3f9\") " Nov 26 15:28:11 crc kubenswrapper[5037]: I1126 15:28:11.849097 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g8kw4\" (UniqueName: \"kubernetes.io/projected/1c0416c3-8555-463e-a63a-e485e06aa3f9-kube-api-access-g8kw4\") pod \"1c0416c3-8555-463e-a63a-e485e06aa3f9\" (UID: \"1c0416c3-8555-463e-a63a-e485e06aa3f9\") " Nov 26 15:28:11 crc kubenswrapper[5037]: I1126 15:28:11.849813 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1c0416c3-8555-463e-a63a-e485e06aa3f9-utilities" (OuterVolumeSpecName: "utilities") pod "1c0416c3-8555-463e-a63a-e485e06aa3f9" (UID: "1c0416c3-8555-463e-a63a-e485e06aa3f9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:28:11 crc kubenswrapper[5037]: I1126 15:28:11.856576 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c0416c3-8555-463e-a63a-e485e06aa3f9-kube-api-access-g8kw4" (OuterVolumeSpecName: "kube-api-access-g8kw4") pod "1c0416c3-8555-463e-a63a-e485e06aa3f9" (UID: "1c0416c3-8555-463e-a63a-e485e06aa3f9"). InnerVolumeSpecName "kube-api-access-g8kw4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:28:11 crc kubenswrapper[5037]: I1126 15:28:11.865331 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1c0416c3-8555-463e-a63a-e485e06aa3f9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1c0416c3-8555-463e-a63a-e485e06aa3f9" (UID: "1c0416c3-8555-463e-a63a-e485e06aa3f9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:28:11 crc kubenswrapper[5037]: I1126 15:28:11.951058 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c0416c3-8555-463e-a63a-e485e06aa3f9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 15:28:11 crc kubenswrapper[5037]: I1126 15:28:11.951262 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g8kw4\" (UniqueName: \"kubernetes.io/projected/1c0416c3-8555-463e-a63a-e485e06aa3f9-kube-api-access-g8kw4\") on node \"crc\" DevicePath \"\"" Nov 26 15:28:11 crc kubenswrapper[5037]: I1126 15:28:11.951353 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c0416c3-8555-463e-a63a-e485e06aa3f9-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 15:28:12 crc kubenswrapper[5037]: I1126 15:28:12.271251 5037 generic.go:334] "Generic (PLEG): container finished" podID="1c0416c3-8555-463e-a63a-e485e06aa3f9" containerID="3d0f714da2570725d7f460bce8480c8d2ae683d21bb7b55a401f95572af59856" exitCode=0 Nov 26 15:28:12 crc kubenswrapper[5037]: I1126 15:28:12.271359 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8wzq6" event={"ID":"1c0416c3-8555-463e-a63a-e485e06aa3f9","Type":"ContainerDied","Data":"3d0f714da2570725d7f460bce8480c8d2ae683d21bb7b55a401f95572af59856"} Nov 26 15:28:12 crc kubenswrapper[5037]: I1126 15:28:12.271417 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8wzq6" event={"ID":"1c0416c3-8555-463e-a63a-e485e06aa3f9","Type":"ContainerDied","Data":"7b0bc19d23293ea1757f971fe91e627864d316c87b94ccac2ea0b5be18ec1f91"} Nov 26 15:28:12 crc kubenswrapper[5037]: I1126 15:28:12.271448 5037 scope.go:117] "RemoveContainer" containerID="3d0f714da2570725d7f460bce8480c8d2ae683d21bb7b55a401f95572af59856" Nov 26 15:28:12 crc kubenswrapper[5037]: I1126 15:28:12.271645 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8wzq6" Nov 26 15:28:12 crc kubenswrapper[5037]: I1126 15:28:12.313025 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8wzq6"] Nov 26 15:28:12 crc kubenswrapper[5037]: I1126 15:28:12.316855 5037 scope.go:117] "RemoveContainer" containerID="017deaa989167047a100269bb052cb956325dd1db981c3a0acd3502d4804d52b" Nov 26 15:28:12 crc kubenswrapper[5037]: I1126 15:28:12.322968 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-8wzq6"] Nov 26 15:28:12 crc kubenswrapper[5037]: I1126 15:28:12.350134 5037 scope.go:117] "RemoveContainer" containerID="0e0669e26f3e2fc7b217a99e03180f9ac6cfd3967182aa6e42698ab5096806e0" Nov 26 15:28:12 crc kubenswrapper[5037]: I1126 15:28:12.390700 5037 scope.go:117] "RemoveContainer" containerID="3d0f714da2570725d7f460bce8480c8d2ae683d21bb7b55a401f95572af59856" Nov 26 15:28:12 crc kubenswrapper[5037]: E1126 15:28:12.391343 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3d0f714da2570725d7f460bce8480c8d2ae683d21bb7b55a401f95572af59856\": container with ID starting with 3d0f714da2570725d7f460bce8480c8d2ae683d21bb7b55a401f95572af59856 not found: ID does not exist" containerID="3d0f714da2570725d7f460bce8480c8d2ae683d21bb7b55a401f95572af59856" Nov 26 15:28:12 crc kubenswrapper[5037]: I1126 15:28:12.391374 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3d0f714da2570725d7f460bce8480c8d2ae683d21bb7b55a401f95572af59856"} err="failed to get container status \"3d0f714da2570725d7f460bce8480c8d2ae683d21bb7b55a401f95572af59856\": rpc error: code = NotFound desc = could not find container \"3d0f714da2570725d7f460bce8480c8d2ae683d21bb7b55a401f95572af59856\": container with ID starting with 3d0f714da2570725d7f460bce8480c8d2ae683d21bb7b55a401f95572af59856 not found: ID does not exist" Nov 26 15:28:12 crc kubenswrapper[5037]: I1126 15:28:12.391394 5037 scope.go:117] "RemoveContainer" containerID="017deaa989167047a100269bb052cb956325dd1db981c3a0acd3502d4804d52b" Nov 26 15:28:12 crc kubenswrapper[5037]: E1126 15:28:12.391887 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"017deaa989167047a100269bb052cb956325dd1db981c3a0acd3502d4804d52b\": container with ID starting with 017deaa989167047a100269bb052cb956325dd1db981c3a0acd3502d4804d52b not found: ID does not exist" containerID="017deaa989167047a100269bb052cb956325dd1db981c3a0acd3502d4804d52b" Nov 26 15:28:12 crc kubenswrapper[5037]: I1126 15:28:12.391924 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"017deaa989167047a100269bb052cb956325dd1db981c3a0acd3502d4804d52b"} err="failed to get container status \"017deaa989167047a100269bb052cb956325dd1db981c3a0acd3502d4804d52b\": rpc error: code = NotFound desc = could not find container \"017deaa989167047a100269bb052cb956325dd1db981c3a0acd3502d4804d52b\": container with ID starting with 017deaa989167047a100269bb052cb956325dd1db981c3a0acd3502d4804d52b not found: ID does not exist" Nov 26 15:28:12 crc kubenswrapper[5037]: I1126 15:28:12.391951 5037 scope.go:117] "RemoveContainer" containerID="0e0669e26f3e2fc7b217a99e03180f9ac6cfd3967182aa6e42698ab5096806e0" Nov 26 15:28:12 crc kubenswrapper[5037]: E1126 15:28:12.392214 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0e0669e26f3e2fc7b217a99e03180f9ac6cfd3967182aa6e42698ab5096806e0\": container with ID starting with 0e0669e26f3e2fc7b217a99e03180f9ac6cfd3967182aa6e42698ab5096806e0 not found: ID does not exist" containerID="0e0669e26f3e2fc7b217a99e03180f9ac6cfd3967182aa6e42698ab5096806e0" Nov 26 15:28:12 crc kubenswrapper[5037]: I1126 15:28:12.392232 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0e0669e26f3e2fc7b217a99e03180f9ac6cfd3967182aa6e42698ab5096806e0"} err="failed to get container status \"0e0669e26f3e2fc7b217a99e03180f9ac6cfd3967182aa6e42698ab5096806e0\": rpc error: code = NotFound desc = could not find container \"0e0669e26f3e2fc7b217a99e03180f9ac6cfd3967182aa6e42698ab5096806e0\": container with ID starting with 0e0669e26f3e2fc7b217a99e03180f9ac6cfd3967182aa6e42698ab5096806e0 not found: ID does not exist" Nov 26 15:28:13 crc kubenswrapper[5037]: I1126 15:28:13.924982 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c0416c3-8555-463e-a63a-e485e06aa3f9" path="/var/lib/kubelet/pods/1c0416c3-8555-463e-a63a-e485e06aa3f9/volumes" Nov 26 15:28:17 crc kubenswrapper[5037]: I1126 15:28:17.910472 5037 scope.go:117] "RemoveContainer" containerID="05e40e9234bad18844d5cc74a70d5e8fe2909fa15220761260c02d3d4b8e4c11" Nov 26 15:28:17 crc kubenswrapper[5037]: E1126 15:28:17.911722 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:28:29 crc kubenswrapper[5037]: I1126 15:28:29.907750 5037 scope.go:117] "RemoveContainer" containerID="05e40e9234bad18844d5cc74a70d5e8fe2909fa15220761260c02d3d4b8e4c11" Nov 26 15:28:29 crc kubenswrapper[5037]: E1126 15:28:29.908465 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:28:41 crc kubenswrapper[5037]: I1126 15:28:41.909189 5037 scope.go:117] "RemoveContainer" containerID="05e40e9234bad18844d5cc74a70d5e8fe2909fa15220761260c02d3d4b8e4c11" Nov 26 15:28:41 crc kubenswrapper[5037]: E1126 15:28:41.910472 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:28:56 crc kubenswrapper[5037]: I1126 15:28:56.301088 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-c49ch"] Nov 26 15:28:56 crc kubenswrapper[5037]: E1126 15:28:56.303082 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c0416c3-8555-463e-a63a-e485e06aa3f9" containerName="extract-content" Nov 26 15:28:56 crc kubenswrapper[5037]: I1126 15:28:56.303187 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c0416c3-8555-463e-a63a-e485e06aa3f9" containerName="extract-content" Nov 26 15:28:56 crc kubenswrapper[5037]: E1126 15:28:56.303273 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a45bd33-c650-4a7f-86b6-db3c4c16617d" containerName="extract-content" Nov 26 15:28:56 crc kubenswrapper[5037]: I1126 15:28:56.303371 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a45bd33-c650-4a7f-86b6-db3c4c16617d" containerName="extract-content" Nov 26 15:28:56 crc kubenswrapper[5037]: E1126 15:28:56.303448 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c0416c3-8555-463e-a63a-e485e06aa3f9" containerName="extract-utilities" Nov 26 15:28:56 crc kubenswrapper[5037]: I1126 15:28:56.303522 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c0416c3-8555-463e-a63a-e485e06aa3f9" containerName="extract-utilities" Nov 26 15:28:56 crc kubenswrapper[5037]: E1126 15:28:56.303607 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a45bd33-c650-4a7f-86b6-db3c4c16617d" containerName="extract-utilities" Nov 26 15:28:56 crc kubenswrapper[5037]: I1126 15:28:56.303678 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a45bd33-c650-4a7f-86b6-db3c4c16617d" containerName="extract-utilities" Nov 26 15:28:56 crc kubenswrapper[5037]: E1126 15:28:56.303742 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c0416c3-8555-463e-a63a-e485e06aa3f9" containerName="registry-server" Nov 26 15:28:56 crc kubenswrapper[5037]: I1126 15:28:56.303799 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c0416c3-8555-463e-a63a-e485e06aa3f9" containerName="registry-server" Nov 26 15:28:56 crc kubenswrapper[5037]: E1126 15:28:56.303884 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a45bd33-c650-4a7f-86b6-db3c4c16617d" containerName="registry-server" Nov 26 15:28:56 crc kubenswrapper[5037]: I1126 15:28:56.303954 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a45bd33-c650-4a7f-86b6-db3c4c16617d" containerName="registry-server" Nov 26 15:28:56 crc kubenswrapper[5037]: I1126 15:28:56.304242 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c0416c3-8555-463e-a63a-e485e06aa3f9" containerName="registry-server" Nov 26 15:28:56 crc kubenswrapper[5037]: I1126 15:28:56.304338 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a45bd33-c650-4a7f-86b6-db3c4c16617d" containerName="registry-server" Nov 26 15:28:56 crc kubenswrapper[5037]: I1126 15:28:56.305386 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c49ch" Nov 26 15:28:56 crc kubenswrapper[5037]: I1126 15:28:56.317331 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-c49ch"] Nov 26 15:28:56 crc kubenswrapper[5037]: I1126 15:28:56.500159 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0c4c4d45-8f87-4800-ad51-4fbd5cc49469-catalog-content\") pod \"certified-operators-c49ch\" (UID: \"0c4c4d45-8f87-4800-ad51-4fbd5cc49469\") " pod="openshift-marketplace/certified-operators-c49ch" Nov 26 15:28:56 crc kubenswrapper[5037]: I1126 15:28:56.500256 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0c4c4d45-8f87-4800-ad51-4fbd5cc49469-utilities\") pod \"certified-operators-c49ch\" (UID: \"0c4c4d45-8f87-4800-ad51-4fbd5cc49469\") " pod="openshift-marketplace/certified-operators-c49ch" Nov 26 15:28:56 crc kubenswrapper[5037]: I1126 15:28:56.500277 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fww5q\" (UniqueName: \"kubernetes.io/projected/0c4c4d45-8f87-4800-ad51-4fbd5cc49469-kube-api-access-fww5q\") pod \"certified-operators-c49ch\" (UID: \"0c4c4d45-8f87-4800-ad51-4fbd5cc49469\") " pod="openshift-marketplace/certified-operators-c49ch" Nov 26 15:28:56 crc kubenswrapper[5037]: I1126 15:28:56.601936 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0c4c4d45-8f87-4800-ad51-4fbd5cc49469-utilities\") pod \"certified-operators-c49ch\" (UID: \"0c4c4d45-8f87-4800-ad51-4fbd5cc49469\") " pod="openshift-marketplace/certified-operators-c49ch" Nov 26 15:28:56 crc kubenswrapper[5037]: I1126 15:28:56.601995 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fww5q\" (UniqueName: \"kubernetes.io/projected/0c4c4d45-8f87-4800-ad51-4fbd5cc49469-kube-api-access-fww5q\") pod \"certified-operators-c49ch\" (UID: \"0c4c4d45-8f87-4800-ad51-4fbd5cc49469\") " pod="openshift-marketplace/certified-operators-c49ch" Nov 26 15:28:56 crc kubenswrapper[5037]: I1126 15:28:56.602064 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0c4c4d45-8f87-4800-ad51-4fbd5cc49469-catalog-content\") pod \"certified-operators-c49ch\" (UID: \"0c4c4d45-8f87-4800-ad51-4fbd5cc49469\") " pod="openshift-marketplace/certified-operators-c49ch" Nov 26 15:28:56 crc kubenswrapper[5037]: I1126 15:28:56.602465 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0c4c4d45-8f87-4800-ad51-4fbd5cc49469-utilities\") pod \"certified-operators-c49ch\" (UID: \"0c4c4d45-8f87-4800-ad51-4fbd5cc49469\") " pod="openshift-marketplace/certified-operators-c49ch" Nov 26 15:28:56 crc kubenswrapper[5037]: I1126 15:28:56.602597 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0c4c4d45-8f87-4800-ad51-4fbd5cc49469-catalog-content\") pod \"certified-operators-c49ch\" (UID: \"0c4c4d45-8f87-4800-ad51-4fbd5cc49469\") " pod="openshift-marketplace/certified-operators-c49ch" Nov 26 15:28:56 crc kubenswrapper[5037]: I1126 15:28:56.636151 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fww5q\" (UniqueName: \"kubernetes.io/projected/0c4c4d45-8f87-4800-ad51-4fbd5cc49469-kube-api-access-fww5q\") pod \"certified-operators-c49ch\" (UID: \"0c4c4d45-8f87-4800-ad51-4fbd5cc49469\") " pod="openshift-marketplace/certified-operators-c49ch" Nov 26 15:28:56 crc kubenswrapper[5037]: I1126 15:28:56.908154 5037 scope.go:117] "RemoveContainer" containerID="05e40e9234bad18844d5cc74a70d5e8fe2909fa15220761260c02d3d4b8e4c11" Nov 26 15:28:56 crc kubenswrapper[5037]: E1126 15:28:56.908736 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:28:56 crc kubenswrapper[5037]: I1126 15:28:56.931885 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c49ch" Nov 26 15:28:57 crc kubenswrapper[5037]: I1126 15:28:57.402391 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-c49ch"] Nov 26 15:28:58 crc kubenswrapper[5037]: I1126 15:28:58.079082 5037 generic.go:334] "Generic (PLEG): container finished" podID="0c4c4d45-8f87-4800-ad51-4fbd5cc49469" containerID="50d60b81f067a8e2388450cbc6a4e709053d25701ac31ba6226d6fa496e27a88" exitCode=0 Nov 26 15:28:58 crc kubenswrapper[5037]: I1126 15:28:58.079124 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c49ch" event={"ID":"0c4c4d45-8f87-4800-ad51-4fbd5cc49469","Type":"ContainerDied","Data":"50d60b81f067a8e2388450cbc6a4e709053d25701ac31ba6226d6fa496e27a88"} Nov 26 15:28:58 crc kubenswrapper[5037]: I1126 15:28:58.079150 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c49ch" event={"ID":"0c4c4d45-8f87-4800-ad51-4fbd5cc49469","Type":"ContainerStarted","Data":"d1cc3115e3c73b722bb4e5c43ab6548e9b85b244882e809ad67f005981985a10"} Nov 26 15:29:00 crc kubenswrapper[5037]: I1126 15:29:00.099123 5037 generic.go:334] "Generic (PLEG): container finished" podID="0c4c4d45-8f87-4800-ad51-4fbd5cc49469" containerID="0832f4ab67ec64446b8ef5d741789f9d24c4577bbdd3bd6401afb86e22fd12a0" exitCode=0 Nov 26 15:29:00 crc kubenswrapper[5037]: I1126 15:29:00.099213 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c49ch" event={"ID":"0c4c4d45-8f87-4800-ad51-4fbd5cc49469","Type":"ContainerDied","Data":"0832f4ab67ec64446b8ef5d741789f9d24c4577bbdd3bd6401afb86e22fd12a0"} Nov 26 15:29:01 crc kubenswrapper[5037]: I1126 15:29:01.112094 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c49ch" event={"ID":"0c4c4d45-8f87-4800-ad51-4fbd5cc49469","Type":"ContainerStarted","Data":"d39964f326b6ebc681d9455e4e8aee945e8af7a28eded5bfe2b3a4ebd300c8cf"} Nov 26 15:29:01 crc kubenswrapper[5037]: I1126 15:29:01.150251 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-c49ch" podStartSLOduration=2.454796189 podStartE2EDuration="5.150211619s" podCreationTimestamp="2025-11-26 15:28:56 +0000 UTC" firstStartedPulling="2025-11-26 15:28:58.083552725 +0000 UTC m=+4404.880322949" lastFinishedPulling="2025-11-26 15:29:00.778968165 +0000 UTC m=+4407.575738379" observedRunningTime="2025-11-26 15:29:01.144515503 +0000 UTC m=+4407.941285697" watchObservedRunningTime="2025-11-26 15:29:01.150211619 +0000 UTC m=+4407.946981823" Nov 26 15:29:06 crc kubenswrapper[5037]: I1126 15:29:06.933052 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-c49ch" Nov 26 15:29:06 crc kubenswrapper[5037]: I1126 15:29:06.933735 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-c49ch" Nov 26 15:29:06 crc kubenswrapper[5037]: I1126 15:29:06.992892 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-c49ch" Nov 26 15:29:07 crc kubenswrapper[5037]: I1126 15:29:07.221302 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-c49ch" Nov 26 15:29:07 crc kubenswrapper[5037]: I1126 15:29:07.287737 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-c49ch"] Nov 26 15:29:07 crc kubenswrapper[5037]: I1126 15:29:07.908238 5037 scope.go:117] "RemoveContainer" containerID="05e40e9234bad18844d5cc74a70d5e8fe2909fa15220761260c02d3d4b8e4c11" Nov 26 15:29:07 crc kubenswrapper[5037]: E1126 15:29:07.909408 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:29:09 crc kubenswrapper[5037]: I1126 15:29:09.184480 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-c49ch" podUID="0c4c4d45-8f87-4800-ad51-4fbd5cc49469" containerName="registry-server" containerID="cri-o://d39964f326b6ebc681d9455e4e8aee945e8af7a28eded5bfe2b3a4ebd300c8cf" gracePeriod=2 Nov 26 15:29:09 crc kubenswrapper[5037]: I1126 15:29:09.603084 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c49ch" Nov 26 15:29:09 crc kubenswrapper[5037]: I1126 15:29:09.708607 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0c4c4d45-8f87-4800-ad51-4fbd5cc49469-utilities\") pod \"0c4c4d45-8f87-4800-ad51-4fbd5cc49469\" (UID: \"0c4c4d45-8f87-4800-ad51-4fbd5cc49469\") " Nov 26 15:29:09 crc kubenswrapper[5037]: I1126 15:29:09.708769 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0c4c4d45-8f87-4800-ad51-4fbd5cc49469-catalog-content\") pod \"0c4c4d45-8f87-4800-ad51-4fbd5cc49469\" (UID: \"0c4c4d45-8f87-4800-ad51-4fbd5cc49469\") " Nov 26 15:29:09 crc kubenswrapper[5037]: I1126 15:29:09.708830 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fww5q\" (UniqueName: \"kubernetes.io/projected/0c4c4d45-8f87-4800-ad51-4fbd5cc49469-kube-api-access-fww5q\") pod \"0c4c4d45-8f87-4800-ad51-4fbd5cc49469\" (UID: \"0c4c4d45-8f87-4800-ad51-4fbd5cc49469\") " Nov 26 15:29:09 crc kubenswrapper[5037]: I1126 15:29:09.709734 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0c4c4d45-8f87-4800-ad51-4fbd5cc49469-utilities" (OuterVolumeSpecName: "utilities") pod "0c4c4d45-8f87-4800-ad51-4fbd5cc49469" (UID: "0c4c4d45-8f87-4800-ad51-4fbd5cc49469"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:29:09 crc kubenswrapper[5037]: I1126 15:29:09.725570 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c4c4d45-8f87-4800-ad51-4fbd5cc49469-kube-api-access-fww5q" (OuterVolumeSpecName: "kube-api-access-fww5q") pod "0c4c4d45-8f87-4800-ad51-4fbd5cc49469" (UID: "0c4c4d45-8f87-4800-ad51-4fbd5cc49469"). InnerVolumeSpecName "kube-api-access-fww5q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:29:09 crc kubenswrapper[5037]: I1126 15:29:09.811845 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fww5q\" (UniqueName: \"kubernetes.io/projected/0c4c4d45-8f87-4800-ad51-4fbd5cc49469-kube-api-access-fww5q\") on node \"crc\" DevicePath \"\"" Nov 26 15:29:09 crc kubenswrapper[5037]: I1126 15:29:09.812228 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0c4c4d45-8f87-4800-ad51-4fbd5cc49469-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 15:29:09 crc kubenswrapper[5037]: I1126 15:29:09.814068 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0c4c4d45-8f87-4800-ad51-4fbd5cc49469-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0c4c4d45-8f87-4800-ad51-4fbd5cc49469" (UID: "0c4c4d45-8f87-4800-ad51-4fbd5cc49469"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:29:09 crc kubenswrapper[5037]: I1126 15:29:09.913543 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0c4c4d45-8f87-4800-ad51-4fbd5cc49469-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 15:29:10 crc kubenswrapper[5037]: I1126 15:29:10.196203 5037 generic.go:334] "Generic (PLEG): container finished" podID="0c4c4d45-8f87-4800-ad51-4fbd5cc49469" containerID="d39964f326b6ebc681d9455e4e8aee945e8af7a28eded5bfe2b3a4ebd300c8cf" exitCode=0 Nov 26 15:29:10 crc kubenswrapper[5037]: I1126 15:29:10.196264 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c49ch" event={"ID":"0c4c4d45-8f87-4800-ad51-4fbd5cc49469","Type":"ContainerDied","Data":"d39964f326b6ebc681d9455e4e8aee945e8af7a28eded5bfe2b3a4ebd300c8cf"} Nov 26 15:29:10 crc kubenswrapper[5037]: I1126 15:29:10.196387 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c49ch" Nov 26 15:29:10 crc kubenswrapper[5037]: I1126 15:29:10.197368 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c49ch" event={"ID":"0c4c4d45-8f87-4800-ad51-4fbd5cc49469","Type":"ContainerDied","Data":"d1cc3115e3c73b722bb4e5c43ab6548e9b85b244882e809ad67f005981985a10"} Nov 26 15:29:10 crc kubenswrapper[5037]: I1126 15:29:10.197515 5037 scope.go:117] "RemoveContainer" containerID="d39964f326b6ebc681d9455e4e8aee945e8af7a28eded5bfe2b3a4ebd300c8cf" Nov 26 15:29:10 crc kubenswrapper[5037]: I1126 15:29:10.230375 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-c49ch"] Nov 26 15:29:10 crc kubenswrapper[5037]: I1126 15:29:10.253798 5037 scope.go:117] "RemoveContainer" containerID="0832f4ab67ec64446b8ef5d741789f9d24c4577bbdd3bd6401afb86e22fd12a0" Nov 26 15:29:10 crc kubenswrapper[5037]: I1126 15:29:10.257270 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-c49ch"] Nov 26 15:29:10 crc kubenswrapper[5037]: I1126 15:29:10.279970 5037 scope.go:117] "RemoveContainer" containerID="50d60b81f067a8e2388450cbc6a4e709053d25701ac31ba6226d6fa496e27a88" Nov 26 15:29:10 crc kubenswrapper[5037]: I1126 15:29:10.321615 5037 scope.go:117] "RemoveContainer" containerID="d39964f326b6ebc681d9455e4e8aee945e8af7a28eded5bfe2b3a4ebd300c8cf" Nov 26 15:29:10 crc kubenswrapper[5037]: E1126 15:29:10.322333 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d39964f326b6ebc681d9455e4e8aee945e8af7a28eded5bfe2b3a4ebd300c8cf\": container with ID starting with d39964f326b6ebc681d9455e4e8aee945e8af7a28eded5bfe2b3a4ebd300c8cf not found: ID does not exist" containerID="d39964f326b6ebc681d9455e4e8aee945e8af7a28eded5bfe2b3a4ebd300c8cf" Nov 26 15:29:10 crc kubenswrapper[5037]: I1126 15:29:10.322420 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d39964f326b6ebc681d9455e4e8aee945e8af7a28eded5bfe2b3a4ebd300c8cf"} err="failed to get container status \"d39964f326b6ebc681d9455e4e8aee945e8af7a28eded5bfe2b3a4ebd300c8cf\": rpc error: code = NotFound desc = could not find container \"d39964f326b6ebc681d9455e4e8aee945e8af7a28eded5bfe2b3a4ebd300c8cf\": container with ID starting with d39964f326b6ebc681d9455e4e8aee945e8af7a28eded5bfe2b3a4ebd300c8cf not found: ID does not exist" Nov 26 15:29:10 crc kubenswrapper[5037]: I1126 15:29:10.322479 5037 scope.go:117] "RemoveContainer" containerID="0832f4ab67ec64446b8ef5d741789f9d24c4577bbdd3bd6401afb86e22fd12a0" Nov 26 15:29:10 crc kubenswrapper[5037]: E1126 15:29:10.323231 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0832f4ab67ec64446b8ef5d741789f9d24c4577bbdd3bd6401afb86e22fd12a0\": container with ID starting with 0832f4ab67ec64446b8ef5d741789f9d24c4577bbdd3bd6401afb86e22fd12a0 not found: ID does not exist" containerID="0832f4ab67ec64446b8ef5d741789f9d24c4577bbdd3bd6401afb86e22fd12a0" Nov 26 15:29:10 crc kubenswrapper[5037]: I1126 15:29:10.323335 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0832f4ab67ec64446b8ef5d741789f9d24c4577bbdd3bd6401afb86e22fd12a0"} err="failed to get container status \"0832f4ab67ec64446b8ef5d741789f9d24c4577bbdd3bd6401afb86e22fd12a0\": rpc error: code = NotFound desc = could not find container \"0832f4ab67ec64446b8ef5d741789f9d24c4577bbdd3bd6401afb86e22fd12a0\": container with ID starting with 0832f4ab67ec64446b8ef5d741789f9d24c4577bbdd3bd6401afb86e22fd12a0 not found: ID does not exist" Nov 26 15:29:10 crc kubenswrapper[5037]: I1126 15:29:10.323382 5037 scope.go:117] "RemoveContainer" containerID="50d60b81f067a8e2388450cbc6a4e709053d25701ac31ba6226d6fa496e27a88" Nov 26 15:29:10 crc kubenswrapper[5037]: E1126 15:29:10.323952 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"50d60b81f067a8e2388450cbc6a4e709053d25701ac31ba6226d6fa496e27a88\": container with ID starting with 50d60b81f067a8e2388450cbc6a4e709053d25701ac31ba6226d6fa496e27a88 not found: ID does not exist" containerID="50d60b81f067a8e2388450cbc6a4e709053d25701ac31ba6226d6fa496e27a88" Nov 26 15:29:10 crc kubenswrapper[5037]: I1126 15:29:10.324002 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"50d60b81f067a8e2388450cbc6a4e709053d25701ac31ba6226d6fa496e27a88"} err="failed to get container status \"50d60b81f067a8e2388450cbc6a4e709053d25701ac31ba6226d6fa496e27a88\": rpc error: code = NotFound desc = could not find container \"50d60b81f067a8e2388450cbc6a4e709053d25701ac31ba6226d6fa496e27a88\": container with ID starting with 50d60b81f067a8e2388450cbc6a4e709053d25701ac31ba6226d6fa496e27a88 not found: ID does not exist" Nov 26 15:29:11 crc kubenswrapper[5037]: I1126 15:29:11.918832 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0c4c4d45-8f87-4800-ad51-4fbd5cc49469" path="/var/lib/kubelet/pods/0c4c4d45-8f87-4800-ad51-4fbd5cc49469/volumes" Nov 26 15:29:21 crc kubenswrapper[5037]: I1126 15:29:21.909258 5037 scope.go:117] "RemoveContainer" containerID="05e40e9234bad18844d5cc74a70d5e8fe2909fa15220761260c02d3d4b8e4c11" Nov 26 15:29:21 crc kubenswrapper[5037]: E1126 15:29:21.911366 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:29:35 crc kubenswrapper[5037]: I1126 15:29:35.908565 5037 scope.go:117] "RemoveContainer" containerID="05e40e9234bad18844d5cc74a70d5e8fe2909fa15220761260c02d3d4b8e4c11" Nov 26 15:29:35 crc kubenswrapper[5037]: E1126 15:29:35.909647 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:29:47 crc kubenswrapper[5037]: I1126 15:29:47.909149 5037 scope.go:117] "RemoveContainer" containerID="05e40e9234bad18844d5cc74a70d5e8fe2909fa15220761260c02d3d4b8e4c11" Nov 26 15:29:47 crc kubenswrapper[5037]: E1126 15:29:47.910601 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:30:00 crc kubenswrapper[5037]: I1126 15:30:00.212522 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402850-2lhfs"] Nov 26 15:30:00 crc kubenswrapper[5037]: E1126 15:30:00.213476 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c4c4d45-8f87-4800-ad51-4fbd5cc49469" containerName="registry-server" Nov 26 15:30:00 crc kubenswrapper[5037]: I1126 15:30:00.213493 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c4c4d45-8f87-4800-ad51-4fbd5cc49469" containerName="registry-server" Nov 26 15:30:00 crc kubenswrapper[5037]: E1126 15:30:00.213520 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c4c4d45-8f87-4800-ad51-4fbd5cc49469" containerName="extract-utilities" Nov 26 15:30:00 crc kubenswrapper[5037]: I1126 15:30:00.213528 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c4c4d45-8f87-4800-ad51-4fbd5cc49469" containerName="extract-utilities" Nov 26 15:30:00 crc kubenswrapper[5037]: E1126 15:30:00.213539 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c4c4d45-8f87-4800-ad51-4fbd5cc49469" containerName="extract-content" Nov 26 15:30:00 crc kubenswrapper[5037]: I1126 15:30:00.213548 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c4c4d45-8f87-4800-ad51-4fbd5cc49469" containerName="extract-content" Nov 26 15:30:00 crc kubenswrapper[5037]: I1126 15:30:00.213738 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c4c4d45-8f87-4800-ad51-4fbd5cc49469" containerName="registry-server" Nov 26 15:30:00 crc kubenswrapper[5037]: I1126 15:30:00.214264 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402850-2lhfs" Nov 26 15:30:00 crc kubenswrapper[5037]: I1126 15:30:00.217554 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 15:30:00 crc kubenswrapper[5037]: I1126 15:30:00.217852 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 15:30:00 crc kubenswrapper[5037]: I1126 15:30:00.228193 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402850-2lhfs"] Nov 26 15:30:00 crc kubenswrapper[5037]: I1126 15:30:00.396629 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/40c6a13f-23b9-4605-aed6-957352361ebb-config-volume\") pod \"collect-profiles-29402850-2lhfs\" (UID: \"40c6a13f-23b9-4605-aed6-957352361ebb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402850-2lhfs" Nov 26 15:30:00 crc kubenswrapper[5037]: I1126 15:30:00.396753 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r2fn5\" (UniqueName: \"kubernetes.io/projected/40c6a13f-23b9-4605-aed6-957352361ebb-kube-api-access-r2fn5\") pod \"collect-profiles-29402850-2lhfs\" (UID: \"40c6a13f-23b9-4605-aed6-957352361ebb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402850-2lhfs" Nov 26 15:30:00 crc kubenswrapper[5037]: I1126 15:30:00.396937 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/40c6a13f-23b9-4605-aed6-957352361ebb-secret-volume\") pod \"collect-profiles-29402850-2lhfs\" (UID: \"40c6a13f-23b9-4605-aed6-957352361ebb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402850-2lhfs" Nov 26 15:30:00 crc kubenswrapper[5037]: I1126 15:30:00.499049 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/40c6a13f-23b9-4605-aed6-957352361ebb-config-volume\") pod \"collect-profiles-29402850-2lhfs\" (UID: \"40c6a13f-23b9-4605-aed6-957352361ebb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402850-2lhfs" Nov 26 15:30:00 crc kubenswrapper[5037]: I1126 15:30:00.499212 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r2fn5\" (UniqueName: \"kubernetes.io/projected/40c6a13f-23b9-4605-aed6-957352361ebb-kube-api-access-r2fn5\") pod \"collect-profiles-29402850-2lhfs\" (UID: \"40c6a13f-23b9-4605-aed6-957352361ebb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402850-2lhfs" Nov 26 15:30:00 crc kubenswrapper[5037]: I1126 15:30:00.499257 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/40c6a13f-23b9-4605-aed6-957352361ebb-secret-volume\") pod \"collect-profiles-29402850-2lhfs\" (UID: \"40c6a13f-23b9-4605-aed6-957352361ebb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402850-2lhfs" Nov 26 15:30:00 crc kubenswrapper[5037]: I1126 15:30:00.501109 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/40c6a13f-23b9-4605-aed6-957352361ebb-config-volume\") pod \"collect-profiles-29402850-2lhfs\" (UID: \"40c6a13f-23b9-4605-aed6-957352361ebb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402850-2lhfs" Nov 26 15:30:00 crc kubenswrapper[5037]: I1126 15:30:00.509497 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/40c6a13f-23b9-4605-aed6-957352361ebb-secret-volume\") pod \"collect-profiles-29402850-2lhfs\" (UID: \"40c6a13f-23b9-4605-aed6-957352361ebb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402850-2lhfs" Nov 26 15:30:00 crc kubenswrapper[5037]: I1126 15:30:00.529478 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r2fn5\" (UniqueName: \"kubernetes.io/projected/40c6a13f-23b9-4605-aed6-957352361ebb-kube-api-access-r2fn5\") pod \"collect-profiles-29402850-2lhfs\" (UID: \"40c6a13f-23b9-4605-aed6-957352361ebb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402850-2lhfs" Nov 26 15:30:00 crc kubenswrapper[5037]: I1126 15:30:00.553220 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402850-2lhfs" Nov 26 15:30:00 crc kubenswrapper[5037]: I1126 15:30:00.854612 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402850-2lhfs"] Nov 26 15:30:01 crc kubenswrapper[5037]: I1126 15:30:01.084345 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402850-2lhfs" event={"ID":"40c6a13f-23b9-4605-aed6-957352361ebb","Type":"ContainerStarted","Data":"57f990296398b2203a923e383c7452bdcb95746c11550b4cfc5c2585ec30c549"} Nov 26 15:30:01 crc kubenswrapper[5037]: I1126 15:30:01.084464 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402850-2lhfs" event={"ID":"40c6a13f-23b9-4605-aed6-957352361ebb","Type":"ContainerStarted","Data":"780fa67802e7b5fa206f1a56111c1e7aca8bf01858e07490c73b831059a0fd2b"} Nov 26 15:30:01 crc kubenswrapper[5037]: I1126 15:30:01.111963 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29402850-2lhfs" podStartSLOduration=1.111931541 podStartE2EDuration="1.111931541s" podCreationTimestamp="2025-11-26 15:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:30:01.103997072 +0000 UTC m=+4467.900767326" watchObservedRunningTime="2025-11-26 15:30:01.111931541 +0000 UTC m=+4467.908701775" Nov 26 15:30:01 crc kubenswrapper[5037]: I1126 15:30:01.908661 5037 scope.go:117] "RemoveContainer" containerID="05e40e9234bad18844d5cc74a70d5e8fe2909fa15220761260c02d3d4b8e4c11" Nov 26 15:30:01 crc kubenswrapper[5037]: E1126 15:30:01.909035 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:30:03 crc kubenswrapper[5037]: I1126 15:30:03.105260 5037 generic.go:334] "Generic (PLEG): container finished" podID="40c6a13f-23b9-4605-aed6-957352361ebb" containerID="57f990296398b2203a923e383c7452bdcb95746c11550b4cfc5c2585ec30c549" exitCode=0 Nov 26 15:30:03 crc kubenswrapper[5037]: I1126 15:30:03.105364 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402850-2lhfs" event={"ID":"40c6a13f-23b9-4605-aed6-957352361ebb","Type":"ContainerDied","Data":"57f990296398b2203a923e383c7452bdcb95746c11550b4cfc5c2585ec30c549"} Nov 26 15:30:04 crc kubenswrapper[5037]: I1126 15:30:04.428910 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402850-2lhfs" Nov 26 15:30:04 crc kubenswrapper[5037]: I1126 15:30:04.571083 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/40c6a13f-23b9-4605-aed6-957352361ebb-secret-volume\") pod \"40c6a13f-23b9-4605-aed6-957352361ebb\" (UID: \"40c6a13f-23b9-4605-aed6-957352361ebb\") " Nov 26 15:30:04 crc kubenswrapper[5037]: I1126 15:30:04.571163 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r2fn5\" (UniqueName: \"kubernetes.io/projected/40c6a13f-23b9-4605-aed6-957352361ebb-kube-api-access-r2fn5\") pod \"40c6a13f-23b9-4605-aed6-957352361ebb\" (UID: \"40c6a13f-23b9-4605-aed6-957352361ebb\") " Nov 26 15:30:04 crc kubenswrapper[5037]: I1126 15:30:04.571220 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/40c6a13f-23b9-4605-aed6-957352361ebb-config-volume\") pod \"40c6a13f-23b9-4605-aed6-957352361ebb\" (UID: \"40c6a13f-23b9-4605-aed6-957352361ebb\") " Nov 26 15:30:04 crc kubenswrapper[5037]: I1126 15:30:04.572419 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/40c6a13f-23b9-4605-aed6-957352361ebb-config-volume" (OuterVolumeSpecName: "config-volume") pod "40c6a13f-23b9-4605-aed6-957352361ebb" (UID: "40c6a13f-23b9-4605-aed6-957352361ebb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:30:04 crc kubenswrapper[5037]: I1126 15:30:04.584503 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40c6a13f-23b9-4605-aed6-957352361ebb-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "40c6a13f-23b9-4605-aed6-957352361ebb" (UID: "40c6a13f-23b9-4605-aed6-957352361ebb"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:30:04 crc kubenswrapper[5037]: I1126 15:30:04.584619 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40c6a13f-23b9-4605-aed6-957352361ebb-kube-api-access-r2fn5" (OuterVolumeSpecName: "kube-api-access-r2fn5") pod "40c6a13f-23b9-4605-aed6-957352361ebb" (UID: "40c6a13f-23b9-4605-aed6-957352361ebb"). InnerVolumeSpecName "kube-api-access-r2fn5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:30:04 crc kubenswrapper[5037]: I1126 15:30:04.672923 5037 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/40c6a13f-23b9-4605-aed6-957352361ebb-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:04 crc kubenswrapper[5037]: I1126 15:30:04.673276 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r2fn5\" (UniqueName: \"kubernetes.io/projected/40c6a13f-23b9-4605-aed6-957352361ebb-kube-api-access-r2fn5\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:04 crc kubenswrapper[5037]: I1126 15:30:04.673321 5037 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/40c6a13f-23b9-4605-aed6-957352361ebb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:05 crc kubenswrapper[5037]: I1126 15:30:05.131595 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402850-2lhfs" event={"ID":"40c6a13f-23b9-4605-aed6-957352361ebb","Type":"ContainerDied","Data":"780fa67802e7b5fa206f1a56111c1e7aca8bf01858e07490c73b831059a0fd2b"} Nov 26 15:30:05 crc kubenswrapper[5037]: I1126 15:30:05.131641 5037 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="780fa67802e7b5fa206f1a56111c1e7aca8bf01858e07490c73b831059a0fd2b" Nov 26 15:30:05 crc kubenswrapper[5037]: I1126 15:30:05.131690 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402850-2lhfs" Nov 26 15:30:05 crc kubenswrapper[5037]: I1126 15:30:05.223168 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402805-5cbhs"] Nov 26 15:30:05 crc kubenswrapper[5037]: I1126 15:30:05.231071 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402805-5cbhs"] Nov 26 15:30:05 crc kubenswrapper[5037]: I1126 15:30:05.926714 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d5f8a89b-07b9-4a28-bca3-8f5564634feb" path="/var/lib/kubelet/pods/d5f8a89b-07b9-4a28-bca3-8f5564634feb/volumes" Nov 26 15:30:14 crc kubenswrapper[5037]: I1126 15:30:14.909182 5037 scope.go:117] "RemoveContainer" containerID="05e40e9234bad18844d5cc74a70d5e8fe2909fa15220761260c02d3d4b8e4c11" Nov 26 15:30:14 crc kubenswrapper[5037]: E1126 15:30:14.910034 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:30:27 crc kubenswrapper[5037]: I1126 15:30:27.908637 5037 scope.go:117] "RemoveContainer" containerID="05e40e9234bad18844d5cc74a70d5e8fe2909fa15220761260c02d3d4b8e4c11" Nov 26 15:30:27 crc kubenswrapper[5037]: E1126 15:30:27.909602 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:30:42 crc kubenswrapper[5037]: I1126 15:30:42.908730 5037 scope.go:117] "RemoveContainer" containerID="05e40e9234bad18844d5cc74a70d5e8fe2909fa15220761260c02d3d4b8e4c11" Nov 26 15:30:42 crc kubenswrapper[5037]: E1126 15:30:42.909473 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:30:51 crc kubenswrapper[5037]: I1126 15:30:51.336608 5037 scope.go:117] "RemoveContainer" containerID="a4dbd91b679db67c424df575ea21fe86ee8a6694de9ee93ff35fcc4822988a82" Nov 26 15:30:54 crc kubenswrapper[5037]: I1126 15:30:54.908960 5037 scope.go:117] "RemoveContainer" containerID="05e40e9234bad18844d5cc74a70d5e8fe2909fa15220761260c02d3d4b8e4c11" Nov 26 15:30:54 crc kubenswrapper[5037]: E1126 15:30:54.911083 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:31:06 crc kubenswrapper[5037]: I1126 15:31:06.908784 5037 scope.go:117] "RemoveContainer" containerID="05e40e9234bad18844d5cc74a70d5e8fe2909fa15220761260c02d3d4b8e4c11" Nov 26 15:31:06 crc kubenswrapper[5037]: E1126 15:31:06.909348 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:31:21 crc kubenswrapper[5037]: I1126 15:31:21.907956 5037 scope.go:117] "RemoveContainer" containerID="05e40e9234bad18844d5cc74a70d5e8fe2909fa15220761260c02d3d4b8e4c11" Nov 26 15:31:21 crc kubenswrapper[5037]: E1126 15:31:21.908772 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:31:34 crc kubenswrapper[5037]: I1126 15:31:34.908612 5037 scope.go:117] "RemoveContainer" containerID="05e40e9234bad18844d5cc74a70d5e8fe2909fa15220761260c02d3d4b8e4c11" Nov 26 15:31:34 crc kubenswrapper[5037]: E1126 15:31:34.909841 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:31:49 crc kubenswrapper[5037]: I1126 15:31:49.908080 5037 scope.go:117] "RemoveContainer" containerID="05e40e9234bad18844d5cc74a70d5e8fe2909fa15220761260c02d3d4b8e4c11" Nov 26 15:31:50 crc kubenswrapper[5037]: I1126 15:31:50.158202 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" event={"ID":"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb","Type":"ContainerStarted","Data":"3eb0af632cc19ef89f31373c09fe518114a27d0fe325b09569e3a3babb449aa7"} Nov 26 15:34:11 crc kubenswrapper[5037]: I1126 15:34:11.247894 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:34:11 crc kubenswrapper[5037]: I1126 15:34:11.248467 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:34:41 crc kubenswrapper[5037]: I1126 15:34:41.247199 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:34:41 crc kubenswrapper[5037]: I1126 15:34:41.248222 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:35:11 crc kubenswrapper[5037]: I1126 15:35:11.247008 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:35:11 crc kubenswrapper[5037]: I1126 15:35:11.247714 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:35:11 crc kubenswrapper[5037]: I1126 15:35:11.247769 5037 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" Nov 26 15:35:11 crc kubenswrapper[5037]: I1126 15:35:11.248349 5037 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3eb0af632cc19ef89f31373c09fe518114a27d0fe325b09569e3a3babb449aa7"} pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 15:35:11 crc kubenswrapper[5037]: I1126 15:35:11.248406 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" containerID="cri-o://3eb0af632cc19ef89f31373c09fe518114a27d0fe325b09569e3a3babb449aa7" gracePeriod=600 Nov 26 15:35:11 crc kubenswrapper[5037]: I1126 15:35:11.749279 5037 generic.go:334] "Generic (PLEG): container finished" podID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerID="3eb0af632cc19ef89f31373c09fe518114a27d0fe325b09569e3a3babb449aa7" exitCode=0 Nov 26 15:35:11 crc kubenswrapper[5037]: I1126 15:35:11.749357 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" event={"ID":"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb","Type":"ContainerDied","Data":"3eb0af632cc19ef89f31373c09fe518114a27d0fe325b09569e3a3babb449aa7"} Nov 26 15:35:11 crc kubenswrapper[5037]: I1126 15:35:11.749632 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" event={"ID":"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb","Type":"ContainerStarted","Data":"2cf07bc1aad06044aecd8035db593b51d0d3f1bb53787812167d734b14f4cc67"} Nov 26 15:35:11 crc kubenswrapper[5037]: I1126 15:35:11.749658 5037 scope.go:117] "RemoveContainer" containerID="05e40e9234bad18844d5cc74a70d5e8fe2909fa15220761260c02d3d4b8e4c11" Nov 26 15:35:28 crc kubenswrapper[5037]: I1126 15:35:28.572903 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-hkdw9/must-gather-7cgw2"] Nov 26 15:35:28 crc kubenswrapper[5037]: E1126 15:35:28.573732 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40c6a13f-23b9-4605-aed6-957352361ebb" containerName="collect-profiles" Nov 26 15:35:28 crc kubenswrapper[5037]: I1126 15:35:28.573744 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="40c6a13f-23b9-4605-aed6-957352361ebb" containerName="collect-profiles" Nov 26 15:35:28 crc kubenswrapper[5037]: I1126 15:35:28.573889 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="40c6a13f-23b9-4605-aed6-957352361ebb" containerName="collect-profiles" Nov 26 15:35:28 crc kubenswrapper[5037]: I1126 15:35:28.574666 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-hkdw9/must-gather-7cgw2" Nov 26 15:35:28 crc kubenswrapper[5037]: I1126 15:35:28.576420 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-hkdw9"/"openshift-service-ca.crt" Nov 26 15:35:28 crc kubenswrapper[5037]: I1126 15:35:28.578025 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-hkdw9/must-gather-7cgw2"] Nov 26 15:35:28 crc kubenswrapper[5037]: I1126 15:35:28.578071 5037 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-hkdw9"/"default-dockercfg-96sqc" Nov 26 15:35:28 crc kubenswrapper[5037]: I1126 15:35:28.578137 5037 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-hkdw9"/"kube-root-ca.crt" Nov 26 15:35:28 crc kubenswrapper[5037]: I1126 15:35:28.700986 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8rkdt\" (UniqueName: \"kubernetes.io/projected/c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb-kube-api-access-8rkdt\") pod \"must-gather-7cgw2\" (UID: \"c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb\") " pod="openshift-must-gather-hkdw9/must-gather-7cgw2" Nov 26 15:35:28 crc kubenswrapper[5037]: I1126 15:35:28.701050 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb-must-gather-output\") pod \"must-gather-7cgw2\" (UID: \"c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb\") " pod="openshift-must-gather-hkdw9/must-gather-7cgw2" Nov 26 15:35:28 crc kubenswrapper[5037]: I1126 15:35:28.802128 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8rkdt\" (UniqueName: \"kubernetes.io/projected/c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb-kube-api-access-8rkdt\") pod \"must-gather-7cgw2\" (UID: \"c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb\") " pod="openshift-must-gather-hkdw9/must-gather-7cgw2" Nov 26 15:35:28 crc kubenswrapper[5037]: I1126 15:35:28.802174 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb-must-gather-output\") pod \"must-gather-7cgw2\" (UID: \"c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb\") " pod="openshift-must-gather-hkdw9/must-gather-7cgw2" Nov 26 15:35:28 crc kubenswrapper[5037]: I1126 15:35:28.802527 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb-must-gather-output\") pod \"must-gather-7cgw2\" (UID: \"c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb\") " pod="openshift-must-gather-hkdw9/must-gather-7cgw2" Nov 26 15:35:28 crc kubenswrapper[5037]: I1126 15:35:28.826085 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8rkdt\" (UniqueName: \"kubernetes.io/projected/c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb-kube-api-access-8rkdt\") pod \"must-gather-7cgw2\" (UID: \"c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb\") " pod="openshift-must-gather-hkdw9/must-gather-7cgw2" Nov 26 15:35:28 crc kubenswrapper[5037]: I1126 15:35:28.889639 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-hkdw9/must-gather-7cgw2" Nov 26 15:35:29 crc kubenswrapper[5037]: I1126 15:35:29.379625 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-hkdw9/must-gather-7cgw2"] Nov 26 15:35:29 crc kubenswrapper[5037]: I1126 15:35:29.392843 5037 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 15:35:29 crc kubenswrapper[5037]: I1126 15:35:29.924413 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-hkdw9/must-gather-7cgw2" event={"ID":"c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb","Type":"ContainerStarted","Data":"1db5bac066f7c5491b79191fe1c9d6ecf763d7ce97cf54a18dc67006d8fb89a8"} Nov 26 15:35:33 crc kubenswrapper[5037]: I1126 15:35:33.964446 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-hkdw9/must-gather-7cgw2" event={"ID":"c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb","Type":"ContainerStarted","Data":"b288b37e7c8d0186cff1f91bd08dd2d8eb96e434c641609c6e59d6605d155070"} Nov 26 15:35:34 crc kubenswrapper[5037]: I1126 15:35:34.973138 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-hkdw9/must-gather-7cgw2" event={"ID":"c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb","Type":"ContainerStarted","Data":"3ded05fe92fd30e6936ae87cf15c3189b785d8810ebabaddd0719f8310a3538c"} Nov 26 15:35:34 crc kubenswrapper[5037]: I1126 15:35:34.994470 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-hkdw9/must-gather-7cgw2" podStartSLOduration=2.79524677 podStartE2EDuration="6.994455013s" podCreationTimestamp="2025-11-26 15:35:28 +0000 UTC" firstStartedPulling="2025-11-26 15:35:29.392584569 +0000 UTC m=+4796.189354763" lastFinishedPulling="2025-11-26 15:35:33.591792822 +0000 UTC m=+4800.388563006" observedRunningTime="2025-11-26 15:35:34.988893039 +0000 UTC m=+4801.785663233" watchObservedRunningTime="2025-11-26 15:35:34.994455013 +0000 UTC m=+4801.791225187" Nov 26 15:36:35 crc kubenswrapper[5037]: I1126 15:36:35.371714 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5aqnw26_d69cc4af-4483-4634-a1f1-b15253c7d42c/util/0.log" Nov 26 15:36:35 crc kubenswrapper[5037]: I1126 15:36:35.472628 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5aqnw26_d69cc4af-4483-4634-a1f1-b15253c7d42c/util/0.log" Nov 26 15:36:35 crc kubenswrapper[5037]: I1126 15:36:35.488621 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5aqnw26_d69cc4af-4483-4634-a1f1-b15253c7d42c/pull/0.log" Nov 26 15:36:35 crc kubenswrapper[5037]: I1126 15:36:35.542663 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5aqnw26_d69cc4af-4483-4634-a1f1-b15253c7d42c/pull/0.log" Nov 26 15:36:35 crc kubenswrapper[5037]: I1126 15:36:35.744762 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5aqnw26_d69cc4af-4483-4634-a1f1-b15253c7d42c/pull/0.log" Nov 26 15:36:35 crc kubenswrapper[5037]: I1126 15:36:35.785221 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5aqnw26_d69cc4af-4483-4634-a1f1-b15253c7d42c/extract/0.log" Nov 26 15:36:35 crc kubenswrapper[5037]: I1126 15:36:35.797633 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5aqnw26_d69cc4af-4483-4634-a1f1-b15253c7d42c/util/0.log" Nov 26 15:36:35 crc kubenswrapper[5037]: I1126 15:36:35.909112 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-7qg65_dad0150e-fc25-4245-ad22-e940fadd107a/kube-rbac-proxy/0.log" Nov 26 15:36:36 crc kubenswrapper[5037]: I1126 15:36:36.022447 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-7qg65_dad0150e-fc25-4245-ad22-e940fadd107a/manager/0.log" Nov 26 15:36:36 crc kubenswrapper[5037]: I1126 15:36:36.025885 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-ndntx_4d00a1ec-3ee8-4166-b497-e96629f2e92a/kube-rbac-proxy/0.log" Nov 26 15:36:36 crc kubenswrapper[5037]: I1126 15:36:36.159944 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-ndntx_4d00a1ec-3ee8-4166-b497-e96629f2e92a/manager/0.log" Nov 26 15:36:36 crc kubenswrapper[5037]: I1126 15:36:36.215395 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-k67q7_4fbefccf-1879-4d21-a312-44f95a16545b/manager/0.log" Nov 26 15:36:36 crc kubenswrapper[5037]: I1126 15:36:36.227732 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-k67q7_4fbefccf-1879-4d21-a312-44f95a16545b/kube-rbac-proxy/0.log" Nov 26 15:36:36 crc kubenswrapper[5037]: I1126 15:36:36.381091 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-589cbd6b5b-s25dn_06c51319-7e28-41b4-be90-8262eb3b7307/kube-rbac-proxy/0.log" Nov 26 15:36:36 crc kubenswrapper[5037]: I1126 15:36:36.588026 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-589cbd6b5b-s25dn_06c51319-7e28-41b4-be90-8262eb3b7307/manager/0.log" Nov 26 15:36:36 crc kubenswrapper[5037]: I1126 15:36:36.602446 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-927td_aa8c9234-d8b0-4975-b4c4-83496196179f/kube-rbac-proxy/0.log" Nov 26 15:36:36 crc kubenswrapper[5037]: I1126 15:36:36.724797 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-927td_aa8c9234-d8b0-4975-b4c4-83496196179f/manager/0.log" Nov 26 15:36:36 crc kubenswrapper[5037]: I1126 15:36:36.756222 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-7szzf_668635d7-22b8-4fa0-8762-4b3c802cf9cb/manager/0.log" Nov 26 15:36:36 crc kubenswrapper[5037]: I1126 15:36:36.789160 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-7szzf_668635d7-22b8-4fa0-8762-4b3c802cf9cb/kube-rbac-proxy/0.log" Nov 26 15:36:36 crc kubenswrapper[5037]: I1126 15:36:36.982550 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-vdw9h_98bbf7c3-bf20-4131-8df2-55af39d6c756/kube-rbac-proxy/0.log" Nov 26 15:36:37 crc kubenswrapper[5037]: I1126 15:36:37.136548 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-vdw9h_98bbf7c3-bf20-4131-8df2-55af39d6c756/manager/0.log" Nov 26 15:36:37 crc kubenswrapper[5037]: I1126 15:36:37.171502 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-8hkwd_2d64e096-3666-4924-b2c3-31584884abb1/kube-rbac-proxy/0.log" Nov 26 15:36:37 crc kubenswrapper[5037]: I1126 15:36:37.215145 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-8hkwd_2d64e096-3666-4924-b2c3-31584884abb1/manager/0.log" Nov 26 15:36:37 crc kubenswrapper[5037]: I1126 15:36:37.326972 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-bgknz_8e942820-209d-40b6-bd79-1836b7af00bb/kube-rbac-proxy/0.log" Nov 26 15:36:37 crc kubenswrapper[5037]: I1126 15:36:37.386415 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-bgknz_8e942820-209d-40b6-bd79-1836b7af00bb/manager/0.log" Nov 26 15:36:37 crc kubenswrapper[5037]: I1126 15:36:37.424710 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5d499bf58b-q6n7b_3982528b-3a86-43af-a0af-2f0ddd71e349/kube-rbac-proxy/0.log" Nov 26 15:36:37 crc kubenswrapper[5037]: I1126 15:36:37.485662 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5d499bf58b-q6n7b_3982528b-3a86-43af-a0af-2f0ddd71e349/manager/0.log" Nov 26 15:36:37 crc kubenswrapper[5037]: I1126 15:36:37.638359 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-z7cq5_8da78b02-ca91-4fca-8710-875bfdd6e6a9/manager/0.log" Nov 26 15:36:37 crc kubenswrapper[5037]: I1126 15:36:37.640753 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-z7cq5_8da78b02-ca91-4fca-8710-875bfdd6e6a9/kube-rbac-proxy/0.log" Nov 26 15:36:37 crc kubenswrapper[5037]: I1126 15:36:37.735607 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-shv5p_4acfd23c-4a99-4705-9312-fa6e816d7004/kube-rbac-proxy/0.log" Nov 26 15:36:37 crc kubenswrapper[5037]: I1126 15:36:37.843752 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-shv5p_4acfd23c-4a99-4705-9312-fa6e816d7004/manager/0.log" Nov 26 15:36:37 crc kubenswrapper[5037]: I1126 15:36:37.926090 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-q4qd8_a84b911c-ef23-4267-bfdb-0b9c9d8b9070/kube-rbac-proxy/0.log" Nov 26 15:36:38 crc kubenswrapper[5037]: I1126 15:36:38.028341 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-q4qd8_a84b911c-ef23-4267-bfdb-0b9c9d8b9070/manager/0.log" Nov 26 15:36:38 crc kubenswrapper[5037]: I1126 15:36:38.127620 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-skfsl_a5ef500e-85f6-4655-af56-720d8e23d4b0/kube-rbac-proxy/0.log" Nov 26 15:36:38 crc kubenswrapper[5037]: I1126 15:36:38.172718 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-skfsl_a5ef500e-85f6-4655-af56-720d8e23d4b0/manager/0.log" Nov 26 15:36:38 crc kubenswrapper[5037]: I1126 15:36:38.198350 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-674cb676c8xwf2x_7311c1ce-321d-49a7-b616-6b8f3fb2ce8c/kube-rbac-proxy/0.log" Nov 26 15:36:38 crc kubenswrapper[5037]: I1126 15:36:38.384907 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-674cb676c8xwf2x_7311c1ce-321d-49a7-b616-6b8f3fb2ce8c/manager/0.log" Nov 26 15:36:38 crc kubenswrapper[5037]: I1126 15:36:38.602252 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-544fb75865-wjh78_ad997d1c-74cb-4e4b-bde6-6b5eefeb9332/operator/0.log" Nov 26 15:36:38 crc kubenswrapper[5037]: I1126 15:36:38.751823 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-zc8jb_3c07b8c9-2517-4830-9455-208774f73353/registry-server/0.log" Nov 26 15:36:38 crc kubenswrapper[5037]: I1126 15:36:38.805546 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-hn8b8_9aa6edf8-6550-4e67-a36c-c1821a4e0778/kube-rbac-proxy/0.log" Nov 26 15:36:38 crc kubenswrapper[5037]: I1126 15:36:38.929328 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-hn8b8_9aa6edf8-6550-4e67-a36c-c1821a4e0778/manager/0.log" Nov 26 15:36:38 crc kubenswrapper[5037]: I1126 15:36:38.947757 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-f5hkj_bd47a58c-4525-4ca4-9e18-9971afc83d7a/kube-rbac-proxy/0.log" Nov 26 15:36:39 crc kubenswrapper[5037]: I1126 15:36:39.022720 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-f5hkj_bd47a58c-4525-4ca4-9e18-9971afc83d7a/manager/0.log" Nov 26 15:36:39 crc kubenswrapper[5037]: I1126 15:36:39.051833 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-659d75f7c6-7qgq6_2446d4a3-2a56-4a21-9726-19cfcfcfd203/manager/0.log" Nov 26 15:36:39 crc kubenswrapper[5037]: I1126 15:36:39.159250 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-mjnxg_66ef6065-211b-4aa2-b2f5-6386ee020518/operator/0.log" Nov 26 15:36:39 crc kubenswrapper[5037]: I1126 15:36:39.229373 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-9dzl2_7b497253-ea07-43ac-a78f-d2a145344041/kube-rbac-proxy/0.log" Nov 26 15:36:39 crc kubenswrapper[5037]: I1126 15:36:39.269035 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-9dzl2_7b497253-ea07-43ac-a78f-d2a145344041/manager/0.log" Nov 26 15:36:39 crc kubenswrapper[5037]: I1126 15:36:39.355503 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-wrk4n_63e61192-9513-41ce-a7f9-983264d63ce8/kube-rbac-proxy/0.log" Nov 26 15:36:39 crc kubenswrapper[5037]: I1126 15:36:39.396668 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-wrk4n_63e61192-9513-41ce-a7f9-983264d63ce8/manager/0.log" Nov 26 15:36:39 crc kubenswrapper[5037]: I1126 15:36:39.445645 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-fcz7d_abb38dd1-fa1b-4056-85f7-2ebbe18977b9/kube-rbac-proxy/0.log" Nov 26 15:36:39 crc kubenswrapper[5037]: I1126 15:36:39.508479 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-fcz7d_abb38dd1-fa1b-4056-85f7-2ebbe18977b9/manager/0.log" Nov 26 15:36:39 crc kubenswrapper[5037]: I1126 15:36:39.570354 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-tsv6x_46f325c2-aa51-4684-aec4-0c31eb822e6d/manager/0.log" Nov 26 15:36:39 crc kubenswrapper[5037]: I1126 15:36:39.585365 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-tsv6x_46f325c2-aa51-4684-aec4-0c31eb822e6d/kube-rbac-proxy/0.log" Nov 26 15:36:56 crc kubenswrapper[5037]: I1126 15:36:56.205852 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-h6kmp"] Nov 26 15:36:56 crc kubenswrapper[5037]: I1126 15:36:56.209841 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h6kmp" Nov 26 15:36:56 crc kubenswrapper[5037]: I1126 15:36:56.219169 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-h6kmp"] Nov 26 15:36:56 crc kubenswrapper[5037]: I1126 15:36:56.245849 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b44cq\" (UniqueName: \"kubernetes.io/projected/cb104e26-c359-4cda-b5c2-89391a64cb69-kube-api-access-b44cq\") pod \"redhat-operators-h6kmp\" (UID: \"cb104e26-c359-4cda-b5c2-89391a64cb69\") " pod="openshift-marketplace/redhat-operators-h6kmp" Nov 26 15:36:56 crc kubenswrapper[5037]: I1126 15:36:56.245941 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb104e26-c359-4cda-b5c2-89391a64cb69-catalog-content\") pod \"redhat-operators-h6kmp\" (UID: \"cb104e26-c359-4cda-b5c2-89391a64cb69\") " pod="openshift-marketplace/redhat-operators-h6kmp" Nov 26 15:36:56 crc kubenswrapper[5037]: I1126 15:36:56.245979 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb104e26-c359-4cda-b5c2-89391a64cb69-utilities\") pod \"redhat-operators-h6kmp\" (UID: \"cb104e26-c359-4cda-b5c2-89391a64cb69\") " pod="openshift-marketplace/redhat-operators-h6kmp" Nov 26 15:36:56 crc kubenswrapper[5037]: I1126 15:36:56.347642 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b44cq\" (UniqueName: \"kubernetes.io/projected/cb104e26-c359-4cda-b5c2-89391a64cb69-kube-api-access-b44cq\") pod \"redhat-operators-h6kmp\" (UID: \"cb104e26-c359-4cda-b5c2-89391a64cb69\") " pod="openshift-marketplace/redhat-operators-h6kmp" Nov 26 15:36:56 crc kubenswrapper[5037]: I1126 15:36:56.347772 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb104e26-c359-4cda-b5c2-89391a64cb69-catalog-content\") pod \"redhat-operators-h6kmp\" (UID: \"cb104e26-c359-4cda-b5c2-89391a64cb69\") " pod="openshift-marketplace/redhat-operators-h6kmp" Nov 26 15:36:56 crc kubenswrapper[5037]: I1126 15:36:56.347832 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb104e26-c359-4cda-b5c2-89391a64cb69-utilities\") pod \"redhat-operators-h6kmp\" (UID: \"cb104e26-c359-4cda-b5c2-89391a64cb69\") " pod="openshift-marketplace/redhat-operators-h6kmp" Nov 26 15:36:56 crc kubenswrapper[5037]: I1126 15:36:56.348446 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb104e26-c359-4cda-b5c2-89391a64cb69-catalog-content\") pod \"redhat-operators-h6kmp\" (UID: \"cb104e26-c359-4cda-b5c2-89391a64cb69\") " pod="openshift-marketplace/redhat-operators-h6kmp" Nov 26 15:36:56 crc kubenswrapper[5037]: I1126 15:36:56.348676 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb104e26-c359-4cda-b5c2-89391a64cb69-utilities\") pod \"redhat-operators-h6kmp\" (UID: \"cb104e26-c359-4cda-b5c2-89391a64cb69\") " pod="openshift-marketplace/redhat-operators-h6kmp" Nov 26 15:36:56 crc kubenswrapper[5037]: I1126 15:36:56.372658 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b44cq\" (UniqueName: \"kubernetes.io/projected/cb104e26-c359-4cda-b5c2-89391a64cb69-kube-api-access-b44cq\") pod \"redhat-operators-h6kmp\" (UID: \"cb104e26-c359-4cda-b5c2-89391a64cb69\") " pod="openshift-marketplace/redhat-operators-h6kmp" Nov 26 15:36:56 crc kubenswrapper[5037]: I1126 15:36:56.555912 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h6kmp" Nov 26 15:36:57 crc kubenswrapper[5037]: I1126 15:36:57.045174 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-h6kmp"] Nov 26 15:36:57 crc kubenswrapper[5037]: I1126 15:36:57.589641 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h6kmp" event={"ID":"cb104e26-c359-4cda-b5c2-89391a64cb69","Type":"ContainerStarted","Data":"8fd1313f98e35dce5784b020a8dccf0a5daeae26dd416262b17ba7022035622c"} Nov 26 15:36:59 crc kubenswrapper[5037]: I1126 15:36:59.608224 5037 generic.go:334] "Generic (PLEG): container finished" podID="cb104e26-c359-4cda-b5c2-89391a64cb69" containerID="e95ce4c9c45c5a4e2dbe5a79ecefb3301a69b56db5ef367a0371c63a74004e47" exitCode=0 Nov 26 15:36:59 crc kubenswrapper[5037]: I1126 15:36:59.608327 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h6kmp" event={"ID":"cb104e26-c359-4cda-b5c2-89391a64cb69","Type":"ContainerDied","Data":"e95ce4c9c45c5a4e2dbe5a79ecefb3301a69b56db5ef367a0371c63a74004e47"} Nov 26 15:37:00 crc kubenswrapper[5037]: I1126 15:37:00.296653 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-g2jw7_a16c7c46-2c28-444d-8b7d-0ef797877620/control-plane-machine-set-operator/0.log" Nov 26 15:37:00 crc kubenswrapper[5037]: I1126 15:37:00.420795 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-l7mvc_c324fe4d-b0c6-4c0a-9dd7-10aa517dcce7/kube-rbac-proxy/0.log" Nov 26 15:37:00 crc kubenswrapper[5037]: I1126 15:37:00.459332 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-l7mvc_c324fe4d-b0c6-4c0a-9dd7-10aa517dcce7/machine-api-operator/0.log" Nov 26 15:37:02 crc kubenswrapper[5037]: I1126 15:37:02.632843 5037 generic.go:334] "Generic (PLEG): container finished" podID="cb104e26-c359-4cda-b5c2-89391a64cb69" containerID="b449515090f15a524f982751aebc4eaa581976d595a7e19be3a2599cfd782fcb" exitCode=0 Nov 26 15:37:02 crc kubenswrapper[5037]: I1126 15:37:02.632940 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h6kmp" event={"ID":"cb104e26-c359-4cda-b5c2-89391a64cb69","Type":"ContainerDied","Data":"b449515090f15a524f982751aebc4eaa581976d595a7e19be3a2599cfd782fcb"} Nov 26 15:37:04 crc kubenswrapper[5037]: I1126 15:37:04.655655 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h6kmp" event={"ID":"cb104e26-c359-4cda-b5c2-89391a64cb69","Type":"ContainerStarted","Data":"67fc0444dca8617c3edc625c9224fa5ee94c38ff0385c40cf88959ee6dc3f94e"} Nov 26 15:37:04 crc kubenswrapper[5037]: I1126 15:37:04.680953 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-h6kmp" podStartSLOduration=4.353695983 podStartE2EDuration="8.680926253s" podCreationTimestamp="2025-11-26 15:36:56 +0000 UTC" firstStartedPulling="2025-11-26 15:36:59.609796546 +0000 UTC m=+4886.406566730" lastFinishedPulling="2025-11-26 15:37:03.937026776 +0000 UTC m=+4890.733797000" observedRunningTime="2025-11-26 15:37:04.679659402 +0000 UTC m=+4891.476429637" watchObservedRunningTime="2025-11-26 15:37:04.680926253 +0000 UTC m=+4891.477696477" Nov 26 15:37:06 crc kubenswrapper[5037]: I1126 15:37:06.556351 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-h6kmp" Nov 26 15:37:06 crc kubenswrapper[5037]: I1126 15:37:06.556428 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-h6kmp" Nov 26 15:37:07 crc kubenswrapper[5037]: I1126 15:37:07.633234 5037 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h6kmp" podUID="cb104e26-c359-4cda-b5c2-89391a64cb69" containerName="registry-server" probeResult="failure" output=< Nov 26 15:37:07 crc kubenswrapper[5037]: timeout: failed to connect service ":50051" within 1s Nov 26 15:37:07 crc kubenswrapper[5037]: > Nov 26 15:37:11 crc kubenswrapper[5037]: I1126 15:37:11.247758 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:37:11 crc kubenswrapper[5037]: I1126 15:37:11.248184 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:37:14 crc kubenswrapper[5037]: I1126 15:37:14.823872 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-86cb77c54b-dgkx5_5301fea1-84e2-4c3c-abdc-dc6464184277/cert-manager-controller/0.log" Nov 26 15:37:14 crc kubenswrapper[5037]: I1126 15:37:14.881543 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-855d9ccff4-dvzww_2696cbc1-003d-4c5b-9346-2f5434393ed7/cert-manager-cainjector/0.log" Nov 26 15:37:14 crc kubenswrapper[5037]: I1126 15:37:14.903947 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-f4fb5df64-4j52c_fcdd281f-2196-4aa1-992a-5b275246be42/cert-manager-webhook/0.log" Nov 26 15:37:16 crc kubenswrapper[5037]: I1126 15:37:16.637966 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-h6kmp" Nov 26 15:37:16 crc kubenswrapper[5037]: I1126 15:37:16.715088 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-h6kmp" Nov 26 15:37:16 crc kubenswrapper[5037]: I1126 15:37:16.893326 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-h6kmp"] Nov 26 15:37:17 crc kubenswrapper[5037]: I1126 15:37:17.747817 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-h6kmp" podUID="cb104e26-c359-4cda-b5c2-89391a64cb69" containerName="registry-server" containerID="cri-o://67fc0444dca8617c3edc625c9224fa5ee94c38ff0385c40cf88959ee6dc3f94e" gracePeriod=2 Nov 26 15:37:18 crc kubenswrapper[5037]: I1126 15:37:18.108787 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h6kmp" Nov 26 15:37:18 crc kubenswrapper[5037]: I1126 15:37:18.220245 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b44cq\" (UniqueName: \"kubernetes.io/projected/cb104e26-c359-4cda-b5c2-89391a64cb69-kube-api-access-b44cq\") pod \"cb104e26-c359-4cda-b5c2-89391a64cb69\" (UID: \"cb104e26-c359-4cda-b5c2-89391a64cb69\") " Nov 26 15:37:18 crc kubenswrapper[5037]: I1126 15:37:18.220404 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb104e26-c359-4cda-b5c2-89391a64cb69-catalog-content\") pod \"cb104e26-c359-4cda-b5c2-89391a64cb69\" (UID: \"cb104e26-c359-4cda-b5c2-89391a64cb69\") " Nov 26 15:37:18 crc kubenswrapper[5037]: I1126 15:37:18.220475 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb104e26-c359-4cda-b5c2-89391a64cb69-utilities\") pod \"cb104e26-c359-4cda-b5c2-89391a64cb69\" (UID: \"cb104e26-c359-4cda-b5c2-89391a64cb69\") " Nov 26 15:37:18 crc kubenswrapper[5037]: I1126 15:37:18.221874 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cb104e26-c359-4cda-b5c2-89391a64cb69-utilities" (OuterVolumeSpecName: "utilities") pod "cb104e26-c359-4cda-b5c2-89391a64cb69" (UID: "cb104e26-c359-4cda-b5c2-89391a64cb69"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:37:18 crc kubenswrapper[5037]: I1126 15:37:18.226130 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb104e26-c359-4cda-b5c2-89391a64cb69-kube-api-access-b44cq" (OuterVolumeSpecName: "kube-api-access-b44cq") pod "cb104e26-c359-4cda-b5c2-89391a64cb69" (UID: "cb104e26-c359-4cda-b5c2-89391a64cb69"). InnerVolumeSpecName "kube-api-access-b44cq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:37:18 crc kubenswrapper[5037]: I1126 15:37:18.322738 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b44cq\" (UniqueName: \"kubernetes.io/projected/cb104e26-c359-4cda-b5c2-89391a64cb69-kube-api-access-b44cq\") on node \"crc\" DevicePath \"\"" Nov 26 15:37:18 crc kubenswrapper[5037]: I1126 15:37:18.322777 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb104e26-c359-4cda-b5c2-89391a64cb69-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 15:37:18 crc kubenswrapper[5037]: I1126 15:37:18.356358 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cb104e26-c359-4cda-b5c2-89391a64cb69-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cb104e26-c359-4cda-b5c2-89391a64cb69" (UID: "cb104e26-c359-4cda-b5c2-89391a64cb69"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:37:18 crc kubenswrapper[5037]: I1126 15:37:18.423912 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb104e26-c359-4cda-b5c2-89391a64cb69-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 15:37:18 crc kubenswrapper[5037]: I1126 15:37:18.758632 5037 generic.go:334] "Generic (PLEG): container finished" podID="cb104e26-c359-4cda-b5c2-89391a64cb69" containerID="67fc0444dca8617c3edc625c9224fa5ee94c38ff0385c40cf88959ee6dc3f94e" exitCode=0 Nov 26 15:37:18 crc kubenswrapper[5037]: I1126 15:37:18.758745 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h6kmp" event={"ID":"cb104e26-c359-4cda-b5c2-89391a64cb69","Type":"ContainerDied","Data":"67fc0444dca8617c3edc625c9224fa5ee94c38ff0385c40cf88959ee6dc3f94e"} Nov 26 15:37:18 crc kubenswrapper[5037]: I1126 15:37:18.759016 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h6kmp" event={"ID":"cb104e26-c359-4cda-b5c2-89391a64cb69","Type":"ContainerDied","Data":"8fd1313f98e35dce5784b020a8dccf0a5daeae26dd416262b17ba7022035622c"} Nov 26 15:37:18 crc kubenswrapper[5037]: I1126 15:37:18.758757 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h6kmp" Nov 26 15:37:18 crc kubenswrapper[5037]: I1126 15:37:18.759053 5037 scope.go:117] "RemoveContainer" containerID="67fc0444dca8617c3edc625c9224fa5ee94c38ff0385c40cf88959ee6dc3f94e" Nov 26 15:37:18 crc kubenswrapper[5037]: I1126 15:37:18.795482 5037 scope.go:117] "RemoveContainer" containerID="b449515090f15a524f982751aebc4eaa581976d595a7e19be3a2599cfd782fcb" Nov 26 15:37:18 crc kubenswrapper[5037]: I1126 15:37:18.811733 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-h6kmp"] Nov 26 15:37:18 crc kubenswrapper[5037]: I1126 15:37:18.830185 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-h6kmp"] Nov 26 15:37:18 crc kubenswrapper[5037]: I1126 15:37:18.840604 5037 scope.go:117] "RemoveContainer" containerID="e95ce4c9c45c5a4e2dbe5a79ecefb3301a69b56db5ef367a0371c63a74004e47" Nov 26 15:37:18 crc kubenswrapper[5037]: I1126 15:37:18.863482 5037 scope.go:117] "RemoveContainer" containerID="67fc0444dca8617c3edc625c9224fa5ee94c38ff0385c40cf88959ee6dc3f94e" Nov 26 15:37:18 crc kubenswrapper[5037]: E1126 15:37:18.863979 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"67fc0444dca8617c3edc625c9224fa5ee94c38ff0385c40cf88959ee6dc3f94e\": container with ID starting with 67fc0444dca8617c3edc625c9224fa5ee94c38ff0385c40cf88959ee6dc3f94e not found: ID does not exist" containerID="67fc0444dca8617c3edc625c9224fa5ee94c38ff0385c40cf88959ee6dc3f94e" Nov 26 15:37:18 crc kubenswrapper[5037]: I1126 15:37:18.864021 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67fc0444dca8617c3edc625c9224fa5ee94c38ff0385c40cf88959ee6dc3f94e"} err="failed to get container status \"67fc0444dca8617c3edc625c9224fa5ee94c38ff0385c40cf88959ee6dc3f94e\": rpc error: code = NotFound desc = could not find container \"67fc0444dca8617c3edc625c9224fa5ee94c38ff0385c40cf88959ee6dc3f94e\": container with ID starting with 67fc0444dca8617c3edc625c9224fa5ee94c38ff0385c40cf88959ee6dc3f94e not found: ID does not exist" Nov 26 15:37:18 crc kubenswrapper[5037]: I1126 15:37:18.864046 5037 scope.go:117] "RemoveContainer" containerID="b449515090f15a524f982751aebc4eaa581976d595a7e19be3a2599cfd782fcb" Nov 26 15:37:18 crc kubenswrapper[5037]: E1126 15:37:18.864436 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b449515090f15a524f982751aebc4eaa581976d595a7e19be3a2599cfd782fcb\": container with ID starting with b449515090f15a524f982751aebc4eaa581976d595a7e19be3a2599cfd782fcb not found: ID does not exist" containerID="b449515090f15a524f982751aebc4eaa581976d595a7e19be3a2599cfd782fcb" Nov 26 15:37:18 crc kubenswrapper[5037]: I1126 15:37:18.864457 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b449515090f15a524f982751aebc4eaa581976d595a7e19be3a2599cfd782fcb"} err="failed to get container status \"b449515090f15a524f982751aebc4eaa581976d595a7e19be3a2599cfd782fcb\": rpc error: code = NotFound desc = could not find container \"b449515090f15a524f982751aebc4eaa581976d595a7e19be3a2599cfd782fcb\": container with ID starting with b449515090f15a524f982751aebc4eaa581976d595a7e19be3a2599cfd782fcb not found: ID does not exist" Nov 26 15:37:18 crc kubenswrapper[5037]: I1126 15:37:18.864474 5037 scope.go:117] "RemoveContainer" containerID="e95ce4c9c45c5a4e2dbe5a79ecefb3301a69b56db5ef367a0371c63a74004e47" Nov 26 15:37:18 crc kubenswrapper[5037]: E1126 15:37:18.864787 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e95ce4c9c45c5a4e2dbe5a79ecefb3301a69b56db5ef367a0371c63a74004e47\": container with ID starting with e95ce4c9c45c5a4e2dbe5a79ecefb3301a69b56db5ef367a0371c63a74004e47 not found: ID does not exist" containerID="e95ce4c9c45c5a4e2dbe5a79ecefb3301a69b56db5ef367a0371c63a74004e47" Nov 26 15:37:18 crc kubenswrapper[5037]: I1126 15:37:18.864813 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e95ce4c9c45c5a4e2dbe5a79ecefb3301a69b56db5ef367a0371c63a74004e47"} err="failed to get container status \"e95ce4c9c45c5a4e2dbe5a79ecefb3301a69b56db5ef367a0371c63a74004e47\": rpc error: code = NotFound desc = could not find container \"e95ce4c9c45c5a4e2dbe5a79ecefb3301a69b56db5ef367a0371c63a74004e47\": container with ID starting with e95ce4c9c45c5a4e2dbe5a79ecefb3301a69b56db5ef367a0371c63a74004e47 not found: ID does not exist" Nov 26 15:37:19 crc kubenswrapper[5037]: I1126 15:37:19.924514 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb104e26-c359-4cda-b5c2-89391a64cb69" path="/var/lib/kubelet/pods/cb104e26-c359-4cda-b5c2-89391a64cb69/volumes" Nov 26 15:37:27 crc kubenswrapper[5037]: I1126 15:37:27.353931 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5874bd7bc5-74fql_fbba51e7-c4f1-4211-a970-f299ef0a6ed9/nmstate-console-plugin/0.log" Nov 26 15:37:27 crc kubenswrapper[5037]: I1126 15:37:27.518085 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-mhw28_db122af0-7421-424b-8de3-2f463d65cbdc/nmstate-handler/0.log" Nov 26 15:37:27 crc kubenswrapper[5037]: I1126 15:37:27.559029 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-w2xlk_4c1a7ee3-c2c6-496d-a366-5b3e4da21c04/kube-rbac-proxy/0.log" Nov 26 15:37:27 crc kubenswrapper[5037]: I1126 15:37:27.560680 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-w2xlk_4c1a7ee3-c2c6-496d-a366-5b3e4da21c04/nmstate-metrics/0.log" Nov 26 15:37:27 crc kubenswrapper[5037]: I1126 15:37:27.736128 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-s2jcr_2e990df6-f7f3-4d6b-9a15-ea5f85abdb66/nmstate-webhook/0.log" Nov 26 15:37:27 crc kubenswrapper[5037]: I1126 15:37:27.745122 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-557fdffb88-bqpl4_abe32f75-6048-4e43-bd89-6389ac78f149/nmstate-operator/0.log" Nov 26 15:37:41 crc kubenswrapper[5037]: I1126 15:37:41.247331 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:37:41 crc kubenswrapper[5037]: I1126 15:37:41.248790 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:37:42 crc kubenswrapper[5037]: I1126 15:37:42.801505 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-whwvq_568c6e76-bd77-4cdb-a947-faf6537e5a41/kube-rbac-proxy/0.log" Nov 26 15:37:42 crc kubenswrapper[5037]: I1126 15:37:42.976835 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qp4qk_a8dac2bd-68ba-4cab-8119-051f1d14219f/cp-frr-files/0.log" Nov 26 15:37:43 crc kubenswrapper[5037]: I1126 15:37:43.183926 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-whwvq_568c6e76-bd77-4cdb-a947-faf6537e5a41/controller/0.log" Nov 26 15:37:43 crc kubenswrapper[5037]: I1126 15:37:43.200391 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qp4qk_a8dac2bd-68ba-4cab-8119-051f1d14219f/cp-frr-files/0.log" Nov 26 15:37:43 crc kubenswrapper[5037]: I1126 15:37:43.210610 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qp4qk_a8dac2bd-68ba-4cab-8119-051f1d14219f/cp-reloader/0.log" Nov 26 15:37:43 crc kubenswrapper[5037]: I1126 15:37:43.272236 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qp4qk_a8dac2bd-68ba-4cab-8119-051f1d14219f/cp-metrics/0.log" Nov 26 15:37:43 crc kubenswrapper[5037]: I1126 15:37:43.360471 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qp4qk_a8dac2bd-68ba-4cab-8119-051f1d14219f/cp-reloader/0.log" Nov 26 15:37:43 crc kubenswrapper[5037]: I1126 15:37:43.493142 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qp4qk_a8dac2bd-68ba-4cab-8119-051f1d14219f/cp-frr-files/0.log" Nov 26 15:37:43 crc kubenswrapper[5037]: I1126 15:37:43.507137 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qp4qk_a8dac2bd-68ba-4cab-8119-051f1d14219f/cp-reloader/0.log" Nov 26 15:37:43 crc kubenswrapper[5037]: I1126 15:37:43.543963 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qp4qk_a8dac2bd-68ba-4cab-8119-051f1d14219f/cp-metrics/0.log" Nov 26 15:37:43 crc kubenswrapper[5037]: I1126 15:37:43.583740 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qp4qk_a8dac2bd-68ba-4cab-8119-051f1d14219f/cp-metrics/0.log" Nov 26 15:37:43 crc kubenswrapper[5037]: I1126 15:37:43.771800 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qp4qk_a8dac2bd-68ba-4cab-8119-051f1d14219f/cp-reloader/0.log" Nov 26 15:37:43 crc kubenswrapper[5037]: I1126 15:37:43.771941 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qp4qk_a8dac2bd-68ba-4cab-8119-051f1d14219f/cp-frr-files/0.log" Nov 26 15:37:43 crc kubenswrapper[5037]: I1126 15:37:43.779524 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qp4qk_a8dac2bd-68ba-4cab-8119-051f1d14219f/controller/0.log" Nov 26 15:37:43 crc kubenswrapper[5037]: I1126 15:37:43.821769 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qp4qk_a8dac2bd-68ba-4cab-8119-051f1d14219f/cp-metrics/0.log" Nov 26 15:37:43 crc kubenswrapper[5037]: I1126 15:37:43.944543 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qp4qk_a8dac2bd-68ba-4cab-8119-051f1d14219f/kube-rbac-proxy/0.log" Nov 26 15:37:43 crc kubenswrapper[5037]: I1126 15:37:43.969592 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qp4qk_a8dac2bd-68ba-4cab-8119-051f1d14219f/frr-metrics/0.log" Nov 26 15:37:44 crc kubenswrapper[5037]: I1126 15:37:44.012595 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qp4qk_a8dac2bd-68ba-4cab-8119-051f1d14219f/kube-rbac-proxy-frr/0.log" Nov 26 15:37:44 crc kubenswrapper[5037]: I1126 15:37:44.119876 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qp4qk_a8dac2bd-68ba-4cab-8119-051f1d14219f/reloader/0.log" Nov 26 15:37:44 crc kubenswrapper[5037]: I1126 15:37:44.178305 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-lxggf_933638aa-7d2b-4e93-b969-42484711d78c/frr-k8s-webhook-server/0.log" Nov 26 15:37:44 crc kubenswrapper[5037]: I1126 15:37:44.411682 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-67bfdbb67f-dk98r_b98b1801-9296-470e-b171-7923029c8747/manager/0.log" Nov 26 15:37:44 crc kubenswrapper[5037]: I1126 15:37:44.500132 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-f4db46f7f-5dxzf_75840ff3-188c-49b7-8dc4-0f52d981a5d8/webhook-server/0.log" Nov 26 15:37:44 crc kubenswrapper[5037]: I1126 15:37:44.679777 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-mcjkn_bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45/kube-rbac-proxy/0.log" Nov 26 15:37:45 crc kubenswrapper[5037]: I1126 15:37:45.299726 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-mcjkn_bd6f7b35-4e97-4a38-ba5e-e4281e0d4b45/speaker/0.log" Nov 26 15:37:45 crc kubenswrapper[5037]: I1126 15:37:45.344328 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qp4qk_a8dac2bd-68ba-4cab-8119-051f1d14219f/frr/0.log" Nov 26 15:37:59 crc kubenswrapper[5037]: I1126 15:37:59.185853 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931axsndb_9ec4a60b-dff4-466f-815c-881dfc3b73aa/util/0.log" Nov 26 15:37:59 crc kubenswrapper[5037]: I1126 15:37:59.390766 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931axsndb_9ec4a60b-dff4-466f-815c-881dfc3b73aa/pull/0.log" Nov 26 15:37:59 crc kubenswrapper[5037]: I1126 15:37:59.395103 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931axsndb_9ec4a60b-dff4-466f-815c-881dfc3b73aa/pull/0.log" Nov 26 15:37:59 crc kubenswrapper[5037]: I1126 15:37:59.424078 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931axsndb_9ec4a60b-dff4-466f-815c-881dfc3b73aa/util/0.log" Nov 26 15:37:59 crc kubenswrapper[5037]: I1126 15:37:59.552527 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931axsndb_9ec4a60b-dff4-466f-815c-881dfc3b73aa/util/0.log" Nov 26 15:37:59 crc kubenswrapper[5037]: I1126 15:37:59.630607 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931axsndb_9ec4a60b-dff4-466f-815c-881dfc3b73aa/pull/0.log" Nov 26 15:37:59 crc kubenswrapper[5037]: I1126 15:37:59.648656 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931axsndb_9ec4a60b-dff4-466f-815c-881dfc3b73aa/extract/0.log" Nov 26 15:37:59 crc kubenswrapper[5037]: I1126 15:37:59.760098 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772egfv62_d819d36f-4bb5-4baf-aa77-76cf0554d458/util/0.log" Nov 26 15:37:59 crc kubenswrapper[5037]: I1126 15:37:59.928255 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772egfv62_d819d36f-4bb5-4baf-aa77-76cf0554d458/pull/0.log" Nov 26 15:37:59 crc kubenswrapper[5037]: I1126 15:37:59.938120 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772egfv62_d819d36f-4bb5-4baf-aa77-76cf0554d458/util/0.log" Nov 26 15:37:59 crc kubenswrapper[5037]: I1126 15:37:59.972886 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772egfv62_d819d36f-4bb5-4baf-aa77-76cf0554d458/pull/0.log" Nov 26 15:38:00 crc kubenswrapper[5037]: I1126 15:38:00.135478 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772egfv62_d819d36f-4bb5-4baf-aa77-76cf0554d458/extract/0.log" Nov 26 15:38:00 crc kubenswrapper[5037]: I1126 15:38:00.157922 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772egfv62_d819d36f-4bb5-4baf-aa77-76cf0554d458/util/0.log" Nov 26 15:38:00 crc kubenswrapper[5037]: I1126 15:38:00.176216 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772egfv62_d819d36f-4bb5-4baf-aa77-76cf0554d458/pull/0.log" Nov 26 15:38:00 crc kubenswrapper[5037]: I1126 15:38:00.307346 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-cslxk_670f1e26-c826-4296-b4f9-5b14ce2a3aa4/extract-utilities/0.log" Nov 26 15:38:00 crc kubenswrapper[5037]: I1126 15:38:00.478067 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-cslxk_670f1e26-c826-4296-b4f9-5b14ce2a3aa4/extract-content/0.log" Nov 26 15:38:00 crc kubenswrapper[5037]: I1126 15:38:00.496063 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-cslxk_670f1e26-c826-4296-b4f9-5b14ce2a3aa4/extract-utilities/0.log" Nov 26 15:38:00 crc kubenswrapper[5037]: I1126 15:38:00.530808 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-cslxk_670f1e26-c826-4296-b4f9-5b14ce2a3aa4/extract-content/0.log" Nov 26 15:38:00 crc kubenswrapper[5037]: I1126 15:38:00.637490 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-cslxk_670f1e26-c826-4296-b4f9-5b14ce2a3aa4/extract-utilities/0.log" Nov 26 15:38:00 crc kubenswrapper[5037]: I1126 15:38:00.678066 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-cslxk_670f1e26-c826-4296-b4f9-5b14ce2a3aa4/extract-content/0.log" Nov 26 15:38:00 crc kubenswrapper[5037]: I1126 15:38:00.838211 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-rfv8m_469e3981-b529-4082-b10d-8b4442a7e7e4/extract-utilities/0.log" Nov 26 15:38:01 crc kubenswrapper[5037]: I1126 15:38:01.080984 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-rfv8m_469e3981-b529-4082-b10d-8b4442a7e7e4/extract-content/0.log" Nov 26 15:38:01 crc kubenswrapper[5037]: I1126 15:38:01.095471 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-rfv8m_469e3981-b529-4082-b10d-8b4442a7e7e4/extract-content/0.log" Nov 26 15:38:01 crc kubenswrapper[5037]: I1126 15:38:01.108096 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-rfv8m_469e3981-b529-4082-b10d-8b4442a7e7e4/extract-utilities/0.log" Nov 26 15:38:01 crc kubenswrapper[5037]: I1126 15:38:01.128598 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-cslxk_670f1e26-c826-4296-b4f9-5b14ce2a3aa4/registry-server/0.log" Nov 26 15:38:01 crc kubenswrapper[5037]: I1126 15:38:01.234470 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-rfv8m_469e3981-b529-4082-b10d-8b4442a7e7e4/extract-utilities/0.log" Nov 26 15:38:01 crc kubenswrapper[5037]: I1126 15:38:01.285057 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-rfv8m_469e3981-b529-4082-b10d-8b4442a7e7e4/extract-content/0.log" Nov 26 15:38:01 crc kubenswrapper[5037]: I1126 15:38:01.440349 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6kkjdw_2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4/util/0.log" Nov 26 15:38:01 crc kubenswrapper[5037]: I1126 15:38:01.545677 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-rfv8m_469e3981-b529-4082-b10d-8b4442a7e7e4/registry-server/0.log" Nov 26 15:38:01 crc kubenswrapper[5037]: I1126 15:38:01.600741 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6kkjdw_2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4/util/0.log" Nov 26 15:38:01 crc kubenswrapper[5037]: I1126 15:38:01.608322 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6kkjdw_2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4/pull/0.log" Nov 26 15:38:01 crc kubenswrapper[5037]: I1126 15:38:01.646100 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6kkjdw_2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4/pull/0.log" Nov 26 15:38:01 crc kubenswrapper[5037]: I1126 15:38:01.854548 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6kkjdw_2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4/extract/0.log" Nov 26 15:38:01 crc kubenswrapper[5037]: I1126 15:38:01.854731 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6kkjdw_2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4/util/0.log" Nov 26 15:38:01 crc kubenswrapper[5037]: I1126 15:38:01.876128 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6kkjdw_2e563802-c1fb-45ac-bdb0-3b4ffcc34fd4/pull/0.log" Nov 26 15:38:02 crc kubenswrapper[5037]: I1126 15:38:02.014670 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-xf9zc_04c1788d-27d1-4615-9147-c4dc6fad86bb/marketplace-operator/0.log" Nov 26 15:38:02 crc kubenswrapper[5037]: I1126 15:38:02.089164 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-24x6j_f458ec30-f647-43d3-9a84-c611ebeb918d/extract-utilities/0.log" Nov 26 15:38:02 crc kubenswrapper[5037]: I1126 15:38:02.264169 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-24x6j_f458ec30-f647-43d3-9a84-c611ebeb918d/extract-content/0.log" Nov 26 15:38:02 crc kubenswrapper[5037]: I1126 15:38:02.272149 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-24x6j_f458ec30-f647-43d3-9a84-c611ebeb918d/extract-content/0.log" Nov 26 15:38:02 crc kubenswrapper[5037]: I1126 15:38:02.279523 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-24x6j_f458ec30-f647-43d3-9a84-c611ebeb918d/extract-utilities/0.log" Nov 26 15:38:02 crc kubenswrapper[5037]: I1126 15:38:02.451136 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-24x6j_f458ec30-f647-43d3-9a84-c611ebeb918d/extract-content/0.log" Nov 26 15:38:02 crc kubenswrapper[5037]: I1126 15:38:02.474111 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-24x6j_f458ec30-f647-43d3-9a84-c611ebeb918d/extract-utilities/0.log" Nov 26 15:38:02 crc kubenswrapper[5037]: I1126 15:38:02.524137 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-nsmvk_5cb26ae3-4fcb-4bcb-8118-1471510b9589/extract-utilities/0.log" Nov 26 15:38:02 crc kubenswrapper[5037]: I1126 15:38:02.697122 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-24x6j_f458ec30-f647-43d3-9a84-c611ebeb918d/registry-server/0.log" Nov 26 15:38:02 crc kubenswrapper[5037]: I1126 15:38:02.719157 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-nsmvk_5cb26ae3-4fcb-4bcb-8118-1471510b9589/extract-content/0.log" Nov 26 15:38:02 crc kubenswrapper[5037]: I1126 15:38:02.745137 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-nsmvk_5cb26ae3-4fcb-4bcb-8118-1471510b9589/extract-utilities/0.log" Nov 26 15:38:02 crc kubenswrapper[5037]: I1126 15:38:02.906468 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-nsmvk_5cb26ae3-4fcb-4bcb-8118-1471510b9589/extract-content/0.log" Nov 26 15:38:03 crc kubenswrapper[5037]: I1126 15:38:03.051703 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-nsmvk_5cb26ae3-4fcb-4bcb-8118-1471510b9589/extract-utilities/0.log" Nov 26 15:38:03 crc kubenswrapper[5037]: I1126 15:38:03.066924 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-nsmvk_5cb26ae3-4fcb-4bcb-8118-1471510b9589/extract-content/0.log" Nov 26 15:38:03 crc kubenswrapper[5037]: I1126 15:38:03.628865 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-nsmvk_5cb26ae3-4fcb-4bcb-8118-1471510b9589/registry-server/0.log" Nov 26 15:38:11 crc kubenswrapper[5037]: I1126 15:38:11.247360 5037 patch_prober.go:28] interesting pod/machine-config-daemon-8jk2d container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:38:11 crc kubenswrapper[5037]: I1126 15:38:11.249347 5037 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:38:11 crc kubenswrapper[5037]: I1126 15:38:11.249568 5037 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" Nov 26 15:38:11 crc kubenswrapper[5037]: I1126 15:38:11.250843 5037 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2cf07bc1aad06044aecd8035db593b51d0d3f1bb53787812167d734b14f4cc67"} pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 15:38:11 crc kubenswrapper[5037]: I1126 15:38:11.251097 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerName="machine-config-daemon" containerID="cri-o://2cf07bc1aad06044aecd8035db593b51d0d3f1bb53787812167d734b14f4cc67" gracePeriod=600 Nov 26 15:38:11 crc kubenswrapper[5037]: E1126 15:38:11.386233 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:38:12 crc kubenswrapper[5037]: I1126 15:38:12.154928 5037 generic.go:334] "Generic (PLEG): container finished" podID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" containerID="2cf07bc1aad06044aecd8035db593b51d0d3f1bb53787812167d734b14f4cc67" exitCode=0 Nov 26 15:38:12 crc kubenswrapper[5037]: I1126 15:38:12.155094 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" event={"ID":"8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb","Type":"ContainerDied","Data":"2cf07bc1aad06044aecd8035db593b51d0d3f1bb53787812167d734b14f4cc67"} Nov 26 15:38:12 crc kubenswrapper[5037]: I1126 15:38:12.155430 5037 scope.go:117] "RemoveContainer" containerID="3eb0af632cc19ef89f31373c09fe518114a27d0fe325b09569e3a3babb449aa7" Nov 26 15:38:12 crc kubenswrapper[5037]: I1126 15:38:12.156349 5037 scope.go:117] "RemoveContainer" containerID="2cf07bc1aad06044aecd8035db593b51d0d3f1bb53787812167d734b14f4cc67" Nov 26 15:38:12 crc kubenswrapper[5037]: E1126 15:38:12.156777 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:38:24 crc kubenswrapper[5037]: I1126 15:38:24.344473 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-5srvd"] Nov 26 15:38:24 crc kubenswrapper[5037]: E1126 15:38:24.345547 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb104e26-c359-4cda-b5c2-89391a64cb69" containerName="registry-server" Nov 26 15:38:24 crc kubenswrapper[5037]: I1126 15:38:24.345570 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb104e26-c359-4cda-b5c2-89391a64cb69" containerName="registry-server" Nov 26 15:38:24 crc kubenswrapper[5037]: E1126 15:38:24.345609 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb104e26-c359-4cda-b5c2-89391a64cb69" containerName="extract-utilities" Nov 26 15:38:24 crc kubenswrapper[5037]: I1126 15:38:24.345621 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb104e26-c359-4cda-b5c2-89391a64cb69" containerName="extract-utilities" Nov 26 15:38:24 crc kubenswrapper[5037]: E1126 15:38:24.345653 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb104e26-c359-4cda-b5c2-89391a64cb69" containerName="extract-content" Nov 26 15:38:24 crc kubenswrapper[5037]: I1126 15:38:24.345667 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb104e26-c359-4cda-b5c2-89391a64cb69" containerName="extract-content" Nov 26 15:38:24 crc kubenswrapper[5037]: I1126 15:38:24.345924 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb104e26-c359-4cda-b5c2-89391a64cb69" containerName="registry-server" Nov 26 15:38:24 crc kubenswrapper[5037]: I1126 15:38:24.347696 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5srvd" Nov 26 15:38:24 crc kubenswrapper[5037]: I1126 15:38:24.375383 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5srvd"] Nov 26 15:38:24 crc kubenswrapper[5037]: I1126 15:38:24.422632 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8trzm\" (UniqueName: \"kubernetes.io/projected/64627b3c-762b-4568-a285-9eedaadda768-kube-api-access-8trzm\") pod \"redhat-marketplace-5srvd\" (UID: \"64627b3c-762b-4568-a285-9eedaadda768\") " pod="openshift-marketplace/redhat-marketplace-5srvd" Nov 26 15:38:24 crc kubenswrapper[5037]: I1126 15:38:24.422712 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64627b3c-762b-4568-a285-9eedaadda768-catalog-content\") pod \"redhat-marketplace-5srvd\" (UID: \"64627b3c-762b-4568-a285-9eedaadda768\") " pod="openshift-marketplace/redhat-marketplace-5srvd" Nov 26 15:38:24 crc kubenswrapper[5037]: I1126 15:38:24.422777 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64627b3c-762b-4568-a285-9eedaadda768-utilities\") pod \"redhat-marketplace-5srvd\" (UID: \"64627b3c-762b-4568-a285-9eedaadda768\") " pod="openshift-marketplace/redhat-marketplace-5srvd" Nov 26 15:38:24 crc kubenswrapper[5037]: I1126 15:38:24.523725 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64627b3c-762b-4568-a285-9eedaadda768-catalog-content\") pod \"redhat-marketplace-5srvd\" (UID: \"64627b3c-762b-4568-a285-9eedaadda768\") " pod="openshift-marketplace/redhat-marketplace-5srvd" Nov 26 15:38:24 crc kubenswrapper[5037]: I1126 15:38:24.523767 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64627b3c-762b-4568-a285-9eedaadda768-utilities\") pod \"redhat-marketplace-5srvd\" (UID: \"64627b3c-762b-4568-a285-9eedaadda768\") " pod="openshift-marketplace/redhat-marketplace-5srvd" Nov 26 15:38:24 crc kubenswrapper[5037]: I1126 15:38:24.523833 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8trzm\" (UniqueName: \"kubernetes.io/projected/64627b3c-762b-4568-a285-9eedaadda768-kube-api-access-8trzm\") pod \"redhat-marketplace-5srvd\" (UID: \"64627b3c-762b-4568-a285-9eedaadda768\") " pod="openshift-marketplace/redhat-marketplace-5srvd" Nov 26 15:38:24 crc kubenswrapper[5037]: I1126 15:38:24.524169 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64627b3c-762b-4568-a285-9eedaadda768-catalog-content\") pod \"redhat-marketplace-5srvd\" (UID: \"64627b3c-762b-4568-a285-9eedaadda768\") " pod="openshift-marketplace/redhat-marketplace-5srvd" Nov 26 15:38:24 crc kubenswrapper[5037]: I1126 15:38:24.524401 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64627b3c-762b-4568-a285-9eedaadda768-utilities\") pod \"redhat-marketplace-5srvd\" (UID: \"64627b3c-762b-4568-a285-9eedaadda768\") " pod="openshift-marketplace/redhat-marketplace-5srvd" Nov 26 15:38:24 crc kubenswrapper[5037]: I1126 15:38:24.552277 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8trzm\" (UniqueName: \"kubernetes.io/projected/64627b3c-762b-4568-a285-9eedaadda768-kube-api-access-8trzm\") pod \"redhat-marketplace-5srvd\" (UID: \"64627b3c-762b-4568-a285-9eedaadda768\") " pod="openshift-marketplace/redhat-marketplace-5srvd" Nov 26 15:38:24 crc kubenswrapper[5037]: I1126 15:38:24.731619 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5srvd" Nov 26 15:38:24 crc kubenswrapper[5037]: I1126 15:38:24.907907 5037 scope.go:117] "RemoveContainer" containerID="2cf07bc1aad06044aecd8035db593b51d0d3f1bb53787812167d734b14f4cc67" Nov 26 15:38:24 crc kubenswrapper[5037]: E1126 15:38:24.908331 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:38:25 crc kubenswrapper[5037]: I1126 15:38:25.208261 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5srvd"] Nov 26 15:38:25 crc kubenswrapper[5037]: I1126 15:38:25.264496 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5srvd" event={"ID":"64627b3c-762b-4568-a285-9eedaadda768","Type":"ContainerStarted","Data":"62113822ad0d2c2b14642efeefe49f45ed8383fcadbb36e13c0eec61d88817c9"} Nov 26 15:38:26 crc kubenswrapper[5037]: I1126 15:38:26.276077 5037 generic.go:334] "Generic (PLEG): container finished" podID="64627b3c-762b-4568-a285-9eedaadda768" containerID="69d0cd2a08a3775b460764c438eff3c798ce4de2a8e6a9b1527dd8a042a3a58e" exitCode=0 Nov 26 15:38:26 crc kubenswrapper[5037]: I1126 15:38:26.276162 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5srvd" event={"ID":"64627b3c-762b-4568-a285-9eedaadda768","Type":"ContainerDied","Data":"69d0cd2a08a3775b460764c438eff3c798ce4de2a8e6a9b1527dd8a042a3a58e"} Nov 26 15:38:27 crc kubenswrapper[5037]: I1126 15:38:27.288405 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5srvd" event={"ID":"64627b3c-762b-4568-a285-9eedaadda768","Type":"ContainerStarted","Data":"09537a68a251538168ba996ebdc271abc992b931f5bfb640ead39a571988c980"} Nov 26 15:38:28 crc kubenswrapper[5037]: I1126 15:38:28.300197 5037 generic.go:334] "Generic (PLEG): container finished" podID="64627b3c-762b-4568-a285-9eedaadda768" containerID="09537a68a251538168ba996ebdc271abc992b931f5bfb640ead39a571988c980" exitCode=0 Nov 26 15:38:28 crc kubenswrapper[5037]: I1126 15:38:28.300245 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5srvd" event={"ID":"64627b3c-762b-4568-a285-9eedaadda768","Type":"ContainerDied","Data":"09537a68a251538168ba996ebdc271abc992b931f5bfb640ead39a571988c980"} Nov 26 15:38:29 crc kubenswrapper[5037]: I1126 15:38:29.313585 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5srvd" event={"ID":"64627b3c-762b-4568-a285-9eedaadda768","Type":"ContainerStarted","Data":"93aca8e39dfd8c1881e189e41f3ebc05c5230ea216b68aff5a6ef28e6e75cce2"} Nov 26 15:38:29 crc kubenswrapper[5037]: I1126 15:38:29.360168 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-5srvd" podStartSLOduration=2.5882851110000002 podStartE2EDuration="5.360140776s" podCreationTimestamp="2025-11-26 15:38:24 +0000 UTC" firstStartedPulling="2025-11-26 15:38:26.278953672 +0000 UTC m=+4973.075723886" lastFinishedPulling="2025-11-26 15:38:29.050809337 +0000 UTC m=+4975.847579551" observedRunningTime="2025-11-26 15:38:29.347643904 +0000 UTC m=+4976.144414158" watchObservedRunningTime="2025-11-26 15:38:29.360140776 +0000 UTC m=+4976.156911000" Nov 26 15:38:34 crc kubenswrapper[5037]: I1126 15:38:34.731995 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-5srvd" Nov 26 15:38:34 crc kubenswrapper[5037]: I1126 15:38:34.734826 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-5srvd" Nov 26 15:38:34 crc kubenswrapper[5037]: I1126 15:38:34.788572 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-5srvd" Nov 26 15:38:35 crc kubenswrapper[5037]: I1126 15:38:35.465155 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-5srvd" Nov 26 15:38:35 crc kubenswrapper[5037]: I1126 15:38:35.527028 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5srvd"] Nov 26 15:38:36 crc kubenswrapper[5037]: I1126 15:38:36.908701 5037 scope.go:117] "RemoveContainer" containerID="2cf07bc1aad06044aecd8035db593b51d0d3f1bb53787812167d734b14f4cc67" Nov 26 15:38:36 crc kubenswrapper[5037]: E1126 15:38:36.909137 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:38:37 crc kubenswrapper[5037]: I1126 15:38:37.389995 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-5srvd" podUID="64627b3c-762b-4568-a285-9eedaadda768" containerName="registry-server" containerID="cri-o://93aca8e39dfd8c1881e189e41f3ebc05c5230ea216b68aff5a6ef28e6e75cce2" gracePeriod=2 Nov 26 15:38:37 crc kubenswrapper[5037]: I1126 15:38:37.852072 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5srvd" Nov 26 15:38:37 crc kubenswrapper[5037]: I1126 15:38:37.953600 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/64627b3c-762b-4568-a285-9eedaadda768-utilities" (OuterVolumeSpecName: "utilities") pod "64627b3c-762b-4568-a285-9eedaadda768" (UID: "64627b3c-762b-4568-a285-9eedaadda768"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:38:37 crc kubenswrapper[5037]: I1126 15:38:37.953666 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64627b3c-762b-4568-a285-9eedaadda768-utilities\") pod \"64627b3c-762b-4568-a285-9eedaadda768\" (UID: \"64627b3c-762b-4568-a285-9eedaadda768\") " Nov 26 15:38:37 crc kubenswrapper[5037]: I1126 15:38:37.953814 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64627b3c-762b-4568-a285-9eedaadda768-catalog-content\") pod \"64627b3c-762b-4568-a285-9eedaadda768\" (UID: \"64627b3c-762b-4568-a285-9eedaadda768\") " Nov 26 15:38:37 crc kubenswrapper[5037]: I1126 15:38:37.960989 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8trzm\" (UniqueName: \"kubernetes.io/projected/64627b3c-762b-4568-a285-9eedaadda768-kube-api-access-8trzm\") pod \"64627b3c-762b-4568-a285-9eedaadda768\" (UID: \"64627b3c-762b-4568-a285-9eedaadda768\") " Nov 26 15:38:37 crc kubenswrapper[5037]: I1126 15:38:37.961780 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64627b3c-762b-4568-a285-9eedaadda768-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 15:38:37 crc kubenswrapper[5037]: I1126 15:38:37.967045 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64627b3c-762b-4568-a285-9eedaadda768-kube-api-access-8trzm" (OuterVolumeSpecName: "kube-api-access-8trzm") pod "64627b3c-762b-4568-a285-9eedaadda768" (UID: "64627b3c-762b-4568-a285-9eedaadda768"). InnerVolumeSpecName "kube-api-access-8trzm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:38:37 crc kubenswrapper[5037]: I1126 15:38:37.977245 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/64627b3c-762b-4568-a285-9eedaadda768-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "64627b3c-762b-4568-a285-9eedaadda768" (UID: "64627b3c-762b-4568-a285-9eedaadda768"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:38:38 crc kubenswrapper[5037]: I1126 15:38:38.063189 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64627b3c-762b-4568-a285-9eedaadda768-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 15:38:38 crc kubenswrapper[5037]: I1126 15:38:38.063228 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8trzm\" (UniqueName: \"kubernetes.io/projected/64627b3c-762b-4568-a285-9eedaadda768-kube-api-access-8trzm\") on node \"crc\" DevicePath \"\"" Nov 26 15:38:38 crc kubenswrapper[5037]: I1126 15:38:38.398222 5037 generic.go:334] "Generic (PLEG): container finished" podID="64627b3c-762b-4568-a285-9eedaadda768" containerID="93aca8e39dfd8c1881e189e41f3ebc05c5230ea216b68aff5a6ef28e6e75cce2" exitCode=0 Nov 26 15:38:38 crc kubenswrapper[5037]: I1126 15:38:38.398397 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5srvd" event={"ID":"64627b3c-762b-4568-a285-9eedaadda768","Type":"ContainerDied","Data":"93aca8e39dfd8c1881e189e41f3ebc05c5230ea216b68aff5a6ef28e6e75cce2"} Nov 26 15:38:38 crc kubenswrapper[5037]: I1126 15:38:38.398590 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5srvd" event={"ID":"64627b3c-762b-4568-a285-9eedaadda768","Type":"ContainerDied","Data":"62113822ad0d2c2b14642efeefe49f45ed8383fcadbb36e13c0eec61d88817c9"} Nov 26 15:38:38 crc kubenswrapper[5037]: I1126 15:38:38.398496 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5srvd" Nov 26 15:38:38 crc kubenswrapper[5037]: I1126 15:38:38.398619 5037 scope.go:117] "RemoveContainer" containerID="93aca8e39dfd8c1881e189e41f3ebc05c5230ea216b68aff5a6ef28e6e75cce2" Nov 26 15:38:38 crc kubenswrapper[5037]: I1126 15:38:38.432093 5037 scope.go:117] "RemoveContainer" containerID="09537a68a251538168ba996ebdc271abc992b931f5bfb640ead39a571988c980" Nov 26 15:38:38 crc kubenswrapper[5037]: I1126 15:38:38.434474 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5srvd"] Nov 26 15:38:38 crc kubenswrapper[5037]: I1126 15:38:38.440669 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-5srvd"] Nov 26 15:38:38 crc kubenswrapper[5037]: I1126 15:38:38.621864 5037 scope.go:117] "RemoveContainer" containerID="69d0cd2a08a3775b460764c438eff3c798ce4de2a8e6a9b1527dd8a042a3a58e" Nov 26 15:38:38 crc kubenswrapper[5037]: I1126 15:38:38.673994 5037 scope.go:117] "RemoveContainer" containerID="93aca8e39dfd8c1881e189e41f3ebc05c5230ea216b68aff5a6ef28e6e75cce2" Nov 26 15:38:38 crc kubenswrapper[5037]: E1126 15:38:38.674380 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"93aca8e39dfd8c1881e189e41f3ebc05c5230ea216b68aff5a6ef28e6e75cce2\": container with ID starting with 93aca8e39dfd8c1881e189e41f3ebc05c5230ea216b68aff5a6ef28e6e75cce2 not found: ID does not exist" containerID="93aca8e39dfd8c1881e189e41f3ebc05c5230ea216b68aff5a6ef28e6e75cce2" Nov 26 15:38:38 crc kubenswrapper[5037]: I1126 15:38:38.674421 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"93aca8e39dfd8c1881e189e41f3ebc05c5230ea216b68aff5a6ef28e6e75cce2"} err="failed to get container status \"93aca8e39dfd8c1881e189e41f3ebc05c5230ea216b68aff5a6ef28e6e75cce2\": rpc error: code = NotFound desc = could not find container \"93aca8e39dfd8c1881e189e41f3ebc05c5230ea216b68aff5a6ef28e6e75cce2\": container with ID starting with 93aca8e39dfd8c1881e189e41f3ebc05c5230ea216b68aff5a6ef28e6e75cce2 not found: ID does not exist" Nov 26 15:38:38 crc kubenswrapper[5037]: I1126 15:38:38.674450 5037 scope.go:117] "RemoveContainer" containerID="09537a68a251538168ba996ebdc271abc992b931f5bfb640ead39a571988c980" Nov 26 15:38:38 crc kubenswrapper[5037]: E1126 15:38:38.674942 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"09537a68a251538168ba996ebdc271abc992b931f5bfb640ead39a571988c980\": container with ID starting with 09537a68a251538168ba996ebdc271abc992b931f5bfb640ead39a571988c980 not found: ID does not exist" containerID="09537a68a251538168ba996ebdc271abc992b931f5bfb640ead39a571988c980" Nov 26 15:38:38 crc kubenswrapper[5037]: I1126 15:38:38.674969 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"09537a68a251538168ba996ebdc271abc992b931f5bfb640ead39a571988c980"} err="failed to get container status \"09537a68a251538168ba996ebdc271abc992b931f5bfb640ead39a571988c980\": rpc error: code = NotFound desc = could not find container \"09537a68a251538168ba996ebdc271abc992b931f5bfb640ead39a571988c980\": container with ID starting with 09537a68a251538168ba996ebdc271abc992b931f5bfb640ead39a571988c980 not found: ID does not exist" Nov 26 15:38:38 crc kubenswrapper[5037]: I1126 15:38:38.674989 5037 scope.go:117] "RemoveContainer" containerID="69d0cd2a08a3775b460764c438eff3c798ce4de2a8e6a9b1527dd8a042a3a58e" Nov 26 15:38:38 crc kubenswrapper[5037]: E1126 15:38:38.675624 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"69d0cd2a08a3775b460764c438eff3c798ce4de2a8e6a9b1527dd8a042a3a58e\": container with ID starting with 69d0cd2a08a3775b460764c438eff3c798ce4de2a8e6a9b1527dd8a042a3a58e not found: ID does not exist" containerID="69d0cd2a08a3775b460764c438eff3c798ce4de2a8e6a9b1527dd8a042a3a58e" Nov 26 15:38:38 crc kubenswrapper[5037]: I1126 15:38:38.675723 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"69d0cd2a08a3775b460764c438eff3c798ce4de2a8e6a9b1527dd8a042a3a58e"} err="failed to get container status \"69d0cd2a08a3775b460764c438eff3c798ce4de2a8e6a9b1527dd8a042a3a58e\": rpc error: code = NotFound desc = could not find container \"69d0cd2a08a3775b460764c438eff3c798ce4de2a8e6a9b1527dd8a042a3a58e\": container with ID starting with 69d0cd2a08a3775b460764c438eff3c798ce4de2a8e6a9b1527dd8a042a3a58e not found: ID does not exist" Nov 26 15:38:39 crc kubenswrapper[5037]: I1126 15:38:39.925404 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="64627b3c-762b-4568-a285-9eedaadda768" path="/var/lib/kubelet/pods/64627b3c-762b-4568-a285-9eedaadda768/volumes" Nov 26 15:38:51 crc kubenswrapper[5037]: I1126 15:38:51.908468 5037 scope.go:117] "RemoveContainer" containerID="2cf07bc1aad06044aecd8035db593b51d0d3f1bb53787812167d734b14f4cc67" Nov 26 15:38:51 crc kubenswrapper[5037]: E1126 15:38:51.909631 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:38:55 crc kubenswrapper[5037]: I1126 15:38:55.012242 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-76brr"] Nov 26 15:38:55 crc kubenswrapper[5037]: E1126 15:38:55.012890 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64627b3c-762b-4568-a285-9eedaadda768" containerName="registry-server" Nov 26 15:38:55 crc kubenswrapper[5037]: I1126 15:38:55.012902 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="64627b3c-762b-4568-a285-9eedaadda768" containerName="registry-server" Nov 26 15:38:55 crc kubenswrapper[5037]: E1126 15:38:55.012926 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64627b3c-762b-4568-a285-9eedaadda768" containerName="extract-content" Nov 26 15:38:55 crc kubenswrapper[5037]: I1126 15:38:55.012932 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="64627b3c-762b-4568-a285-9eedaadda768" containerName="extract-content" Nov 26 15:38:55 crc kubenswrapper[5037]: E1126 15:38:55.012942 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64627b3c-762b-4568-a285-9eedaadda768" containerName="extract-utilities" Nov 26 15:38:55 crc kubenswrapper[5037]: I1126 15:38:55.012949 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="64627b3c-762b-4568-a285-9eedaadda768" containerName="extract-utilities" Nov 26 15:38:55 crc kubenswrapper[5037]: I1126 15:38:55.013081 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="64627b3c-762b-4568-a285-9eedaadda768" containerName="registry-server" Nov 26 15:38:55 crc kubenswrapper[5037]: I1126 15:38:55.014031 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-76brr" Nov 26 15:38:55 crc kubenswrapper[5037]: I1126 15:38:55.029498 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-76brr"] Nov 26 15:38:55 crc kubenswrapper[5037]: I1126 15:38:55.208367 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a2efbb6-cf00-4fad-a36f-c82fb4bab849-catalog-content\") pod \"community-operators-76brr\" (UID: \"3a2efbb6-cf00-4fad-a36f-c82fb4bab849\") " pod="openshift-marketplace/community-operators-76brr" Nov 26 15:38:55 crc kubenswrapper[5037]: I1126 15:38:55.208422 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xftpw\" (UniqueName: \"kubernetes.io/projected/3a2efbb6-cf00-4fad-a36f-c82fb4bab849-kube-api-access-xftpw\") pod \"community-operators-76brr\" (UID: \"3a2efbb6-cf00-4fad-a36f-c82fb4bab849\") " pod="openshift-marketplace/community-operators-76brr" Nov 26 15:38:55 crc kubenswrapper[5037]: I1126 15:38:55.208458 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a2efbb6-cf00-4fad-a36f-c82fb4bab849-utilities\") pod \"community-operators-76brr\" (UID: \"3a2efbb6-cf00-4fad-a36f-c82fb4bab849\") " pod="openshift-marketplace/community-operators-76brr" Nov 26 15:38:55 crc kubenswrapper[5037]: I1126 15:38:55.309772 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xftpw\" (UniqueName: \"kubernetes.io/projected/3a2efbb6-cf00-4fad-a36f-c82fb4bab849-kube-api-access-xftpw\") pod \"community-operators-76brr\" (UID: \"3a2efbb6-cf00-4fad-a36f-c82fb4bab849\") " pod="openshift-marketplace/community-operators-76brr" Nov 26 15:38:55 crc kubenswrapper[5037]: I1126 15:38:55.310154 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a2efbb6-cf00-4fad-a36f-c82fb4bab849-utilities\") pod \"community-operators-76brr\" (UID: \"3a2efbb6-cf00-4fad-a36f-c82fb4bab849\") " pod="openshift-marketplace/community-operators-76brr" Nov 26 15:38:55 crc kubenswrapper[5037]: I1126 15:38:55.310406 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a2efbb6-cf00-4fad-a36f-c82fb4bab849-catalog-content\") pod \"community-operators-76brr\" (UID: \"3a2efbb6-cf00-4fad-a36f-c82fb4bab849\") " pod="openshift-marketplace/community-operators-76brr" Nov 26 15:38:55 crc kubenswrapper[5037]: I1126 15:38:55.310729 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a2efbb6-cf00-4fad-a36f-c82fb4bab849-utilities\") pod \"community-operators-76brr\" (UID: \"3a2efbb6-cf00-4fad-a36f-c82fb4bab849\") " pod="openshift-marketplace/community-operators-76brr" Nov 26 15:38:55 crc kubenswrapper[5037]: I1126 15:38:55.310781 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a2efbb6-cf00-4fad-a36f-c82fb4bab849-catalog-content\") pod \"community-operators-76brr\" (UID: \"3a2efbb6-cf00-4fad-a36f-c82fb4bab849\") " pod="openshift-marketplace/community-operators-76brr" Nov 26 15:38:55 crc kubenswrapper[5037]: I1126 15:38:55.330213 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xftpw\" (UniqueName: \"kubernetes.io/projected/3a2efbb6-cf00-4fad-a36f-c82fb4bab849-kube-api-access-xftpw\") pod \"community-operators-76brr\" (UID: \"3a2efbb6-cf00-4fad-a36f-c82fb4bab849\") " pod="openshift-marketplace/community-operators-76brr" Nov 26 15:38:55 crc kubenswrapper[5037]: I1126 15:38:55.341866 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-76brr" Nov 26 15:38:55 crc kubenswrapper[5037]: I1126 15:38:55.854267 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-76brr"] Nov 26 15:38:55 crc kubenswrapper[5037]: W1126 15:38:55.863833 5037 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3a2efbb6_cf00_4fad_a36f_c82fb4bab849.slice/crio-d7aada8e716b2f600f480e4c43a21df6478112b5a89da4f16d8c5557787a2a91 WatchSource:0}: Error finding container d7aada8e716b2f600f480e4c43a21df6478112b5a89da4f16d8c5557787a2a91: Status 404 returned error can't find the container with id d7aada8e716b2f600f480e4c43a21df6478112b5a89da4f16d8c5557787a2a91 Nov 26 15:38:56 crc kubenswrapper[5037]: I1126 15:38:56.589711 5037 generic.go:334] "Generic (PLEG): container finished" podID="3a2efbb6-cf00-4fad-a36f-c82fb4bab849" containerID="5241525b40e8e8c302b9268c42baf286c25db5a24cfe1696b093a8da5919b13a" exitCode=0 Nov 26 15:38:56 crc kubenswrapper[5037]: I1126 15:38:56.589832 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-76brr" event={"ID":"3a2efbb6-cf00-4fad-a36f-c82fb4bab849","Type":"ContainerDied","Data":"5241525b40e8e8c302b9268c42baf286c25db5a24cfe1696b093a8da5919b13a"} Nov 26 15:38:56 crc kubenswrapper[5037]: I1126 15:38:56.590120 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-76brr" event={"ID":"3a2efbb6-cf00-4fad-a36f-c82fb4bab849","Type":"ContainerStarted","Data":"d7aada8e716b2f600f480e4c43a21df6478112b5a89da4f16d8c5557787a2a91"} Nov 26 15:38:58 crc kubenswrapper[5037]: I1126 15:38:58.607725 5037 generic.go:334] "Generic (PLEG): container finished" podID="3a2efbb6-cf00-4fad-a36f-c82fb4bab849" containerID="6cb229ec17873f7143512b613127b2c8d233534979910da7149419498717301e" exitCode=0 Nov 26 15:38:58 crc kubenswrapper[5037]: I1126 15:38:58.608019 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-76brr" event={"ID":"3a2efbb6-cf00-4fad-a36f-c82fb4bab849","Type":"ContainerDied","Data":"6cb229ec17873f7143512b613127b2c8d233534979910da7149419498717301e"} Nov 26 15:39:00 crc kubenswrapper[5037]: I1126 15:39:00.628001 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-76brr" event={"ID":"3a2efbb6-cf00-4fad-a36f-c82fb4bab849","Type":"ContainerStarted","Data":"8431aa15926ff36b2c51d5666f951ec2bcff1a423125ba9cff0ba3d16ea30f0f"} Nov 26 15:39:00 crc kubenswrapper[5037]: I1126 15:39:00.677532 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-76brr" podStartSLOduration=3.259382721 podStartE2EDuration="6.6774957s" podCreationTimestamp="2025-11-26 15:38:54 +0000 UTC" firstStartedPulling="2025-11-26 15:38:56.59203955 +0000 UTC m=+5003.388809744" lastFinishedPulling="2025-11-26 15:39:00.010152529 +0000 UTC m=+5006.806922723" observedRunningTime="2025-11-26 15:39:00.668078563 +0000 UTC m=+5007.464848757" watchObservedRunningTime="2025-11-26 15:39:00.6774957 +0000 UTC m=+5007.474265924" Nov 26 15:39:03 crc kubenswrapper[5037]: I1126 15:39:03.917634 5037 scope.go:117] "RemoveContainer" containerID="2cf07bc1aad06044aecd8035db593b51d0d3f1bb53787812167d734b14f4cc67" Nov 26 15:39:03 crc kubenswrapper[5037]: E1126 15:39:03.918489 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:39:05 crc kubenswrapper[5037]: I1126 15:39:05.342366 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-76brr" Nov 26 15:39:05 crc kubenswrapper[5037]: I1126 15:39:05.342544 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-76brr" Nov 26 15:39:05 crc kubenswrapper[5037]: I1126 15:39:05.410150 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-76brr" Nov 26 15:39:05 crc kubenswrapper[5037]: I1126 15:39:05.775796 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-76brr" Nov 26 15:39:05 crc kubenswrapper[5037]: I1126 15:39:05.831195 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-76brr"] Nov 26 15:39:07 crc kubenswrapper[5037]: I1126 15:39:07.715744 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-76brr" podUID="3a2efbb6-cf00-4fad-a36f-c82fb4bab849" containerName="registry-server" containerID="cri-o://8431aa15926ff36b2c51d5666f951ec2bcff1a423125ba9cff0ba3d16ea30f0f" gracePeriod=2 Nov 26 15:39:08 crc kubenswrapper[5037]: I1126 15:39:08.146648 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-76brr" Nov 26 15:39:08 crc kubenswrapper[5037]: I1126 15:39:08.249754 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xftpw\" (UniqueName: \"kubernetes.io/projected/3a2efbb6-cf00-4fad-a36f-c82fb4bab849-kube-api-access-xftpw\") pod \"3a2efbb6-cf00-4fad-a36f-c82fb4bab849\" (UID: \"3a2efbb6-cf00-4fad-a36f-c82fb4bab849\") " Nov 26 15:39:08 crc kubenswrapper[5037]: I1126 15:39:08.249924 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a2efbb6-cf00-4fad-a36f-c82fb4bab849-utilities\") pod \"3a2efbb6-cf00-4fad-a36f-c82fb4bab849\" (UID: \"3a2efbb6-cf00-4fad-a36f-c82fb4bab849\") " Nov 26 15:39:08 crc kubenswrapper[5037]: I1126 15:39:08.249963 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a2efbb6-cf00-4fad-a36f-c82fb4bab849-catalog-content\") pod \"3a2efbb6-cf00-4fad-a36f-c82fb4bab849\" (UID: \"3a2efbb6-cf00-4fad-a36f-c82fb4bab849\") " Nov 26 15:39:08 crc kubenswrapper[5037]: I1126 15:39:08.262470 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a2efbb6-cf00-4fad-a36f-c82fb4bab849-utilities" (OuterVolumeSpecName: "utilities") pod "3a2efbb6-cf00-4fad-a36f-c82fb4bab849" (UID: "3a2efbb6-cf00-4fad-a36f-c82fb4bab849"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:39:08 crc kubenswrapper[5037]: I1126 15:39:08.267333 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a2efbb6-cf00-4fad-a36f-c82fb4bab849-kube-api-access-xftpw" (OuterVolumeSpecName: "kube-api-access-xftpw") pod "3a2efbb6-cf00-4fad-a36f-c82fb4bab849" (UID: "3a2efbb6-cf00-4fad-a36f-c82fb4bab849"). InnerVolumeSpecName "kube-api-access-xftpw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:39:08 crc kubenswrapper[5037]: I1126 15:39:08.327219 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a2efbb6-cf00-4fad-a36f-c82fb4bab849-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3a2efbb6-cf00-4fad-a36f-c82fb4bab849" (UID: "3a2efbb6-cf00-4fad-a36f-c82fb4bab849"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:39:08 crc kubenswrapper[5037]: I1126 15:39:08.352925 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a2efbb6-cf00-4fad-a36f-c82fb4bab849-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 15:39:08 crc kubenswrapper[5037]: I1126 15:39:08.353747 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a2efbb6-cf00-4fad-a36f-c82fb4bab849-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 15:39:08 crc kubenswrapper[5037]: I1126 15:39:08.353786 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xftpw\" (UniqueName: \"kubernetes.io/projected/3a2efbb6-cf00-4fad-a36f-c82fb4bab849-kube-api-access-xftpw\") on node \"crc\" DevicePath \"\"" Nov 26 15:39:08 crc kubenswrapper[5037]: I1126 15:39:08.728805 5037 generic.go:334] "Generic (PLEG): container finished" podID="3a2efbb6-cf00-4fad-a36f-c82fb4bab849" containerID="8431aa15926ff36b2c51d5666f951ec2bcff1a423125ba9cff0ba3d16ea30f0f" exitCode=0 Nov 26 15:39:08 crc kubenswrapper[5037]: I1126 15:39:08.728936 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-76brr" event={"ID":"3a2efbb6-cf00-4fad-a36f-c82fb4bab849","Type":"ContainerDied","Data":"8431aa15926ff36b2c51d5666f951ec2bcff1a423125ba9cff0ba3d16ea30f0f"} Nov 26 15:39:08 crc kubenswrapper[5037]: I1126 15:39:08.728977 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-76brr" event={"ID":"3a2efbb6-cf00-4fad-a36f-c82fb4bab849","Type":"ContainerDied","Data":"d7aada8e716b2f600f480e4c43a21df6478112b5a89da4f16d8c5557787a2a91"} Nov 26 15:39:08 crc kubenswrapper[5037]: I1126 15:39:08.729005 5037 scope.go:117] "RemoveContainer" containerID="8431aa15926ff36b2c51d5666f951ec2bcff1a423125ba9cff0ba3d16ea30f0f" Nov 26 15:39:08 crc kubenswrapper[5037]: I1126 15:39:08.729252 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-76brr" Nov 26 15:39:08 crc kubenswrapper[5037]: I1126 15:39:08.759216 5037 scope.go:117] "RemoveContainer" containerID="6cb229ec17873f7143512b613127b2c8d233534979910da7149419498717301e" Nov 26 15:39:08 crc kubenswrapper[5037]: I1126 15:39:08.793388 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-76brr"] Nov 26 15:39:08 crc kubenswrapper[5037]: I1126 15:39:08.805331 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-76brr"] Nov 26 15:39:08 crc kubenswrapper[5037]: I1126 15:39:08.824337 5037 scope.go:117] "RemoveContainer" containerID="5241525b40e8e8c302b9268c42baf286c25db5a24cfe1696b093a8da5919b13a" Nov 26 15:39:08 crc kubenswrapper[5037]: I1126 15:39:08.851486 5037 scope.go:117] "RemoveContainer" containerID="8431aa15926ff36b2c51d5666f951ec2bcff1a423125ba9cff0ba3d16ea30f0f" Nov 26 15:39:08 crc kubenswrapper[5037]: E1126 15:39:08.852276 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8431aa15926ff36b2c51d5666f951ec2bcff1a423125ba9cff0ba3d16ea30f0f\": container with ID starting with 8431aa15926ff36b2c51d5666f951ec2bcff1a423125ba9cff0ba3d16ea30f0f not found: ID does not exist" containerID="8431aa15926ff36b2c51d5666f951ec2bcff1a423125ba9cff0ba3d16ea30f0f" Nov 26 15:39:08 crc kubenswrapper[5037]: I1126 15:39:08.852346 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8431aa15926ff36b2c51d5666f951ec2bcff1a423125ba9cff0ba3d16ea30f0f"} err="failed to get container status \"8431aa15926ff36b2c51d5666f951ec2bcff1a423125ba9cff0ba3d16ea30f0f\": rpc error: code = NotFound desc = could not find container \"8431aa15926ff36b2c51d5666f951ec2bcff1a423125ba9cff0ba3d16ea30f0f\": container with ID starting with 8431aa15926ff36b2c51d5666f951ec2bcff1a423125ba9cff0ba3d16ea30f0f not found: ID does not exist" Nov 26 15:39:08 crc kubenswrapper[5037]: I1126 15:39:08.852376 5037 scope.go:117] "RemoveContainer" containerID="6cb229ec17873f7143512b613127b2c8d233534979910da7149419498717301e" Nov 26 15:39:08 crc kubenswrapper[5037]: E1126 15:39:08.852792 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6cb229ec17873f7143512b613127b2c8d233534979910da7149419498717301e\": container with ID starting with 6cb229ec17873f7143512b613127b2c8d233534979910da7149419498717301e not found: ID does not exist" containerID="6cb229ec17873f7143512b613127b2c8d233534979910da7149419498717301e" Nov 26 15:39:08 crc kubenswrapper[5037]: I1126 15:39:08.853045 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6cb229ec17873f7143512b613127b2c8d233534979910da7149419498717301e"} err="failed to get container status \"6cb229ec17873f7143512b613127b2c8d233534979910da7149419498717301e\": rpc error: code = NotFound desc = could not find container \"6cb229ec17873f7143512b613127b2c8d233534979910da7149419498717301e\": container with ID starting with 6cb229ec17873f7143512b613127b2c8d233534979910da7149419498717301e not found: ID does not exist" Nov 26 15:39:08 crc kubenswrapper[5037]: I1126 15:39:08.853180 5037 scope.go:117] "RemoveContainer" containerID="5241525b40e8e8c302b9268c42baf286c25db5a24cfe1696b093a8da5919b13a" Nov 26 15:39:08 crc kubenswrapper[5037]: E1126 15:39:08.853666 5037 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5241525b40e8e8c302b9268c42baf286c25db5a24cfe1696b093a8da5919b13a\": container with ID starting with 5241525b40e8e8c302b9268c42baf286c25db5a24cfe1696b093a8da5919b13a not found: ID does not exist" containerID="5241525b40e8e8c302b9268c42baf286c25db5a24cfe1696b093a8da5919b13a" Nov 26 15:39:08 crc kubenswrapper[5037]: I1126 15:39:08.853753 5037 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5241525b40e8e8c302b9268c42baf286c25db5a24cfe1696b093a8da5919b13a"} err="failed to get container status \"5241525b40e8e8c302b9268c42baf286c25db5a24cfe1696b093a8da5919b13a\": rpc error: code = NotFound desc = could not find container \"5241525b40e8e8c302b9268c42baf286c25db5a24cfe1696b093a8da5919b13a\": container with ID starting with 5241525b40e8e8c302b9268c42baf286c25db5a24cfe1696b093a8da5919b13a not found: ID does not exist" Nov 26 15:39:09 crc kubenswrapper[5037]: I1126 15:39:09.919226 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a2efbb6-cf00-4fad-a36f-c82fb4bab849" path="/var/lib/kubelet/pods/3a2efbb6-cf00-4fad-a36f-c82fb4bab849/volumes" Nov 26 15:39:10 crc kubenswrapper[5037]: I1126 15:39:10.752330 5037 generic.go:334] "Generic (PLEG): container finished" podID="c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb" containerID="b288b37e7c8d0186cff1f91bd08dd2d8eb96e434c641609c6e59d6605d155070" exitCode=0 Nov 26 15:39:10 crc kubenswrapper[5037]: I1126 15:39:10.752463 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-hkdw9/must-gather-7cgw2" event={"ID":"c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb","Type":"ContainerDied","Data":"b288b37e7c8d0186cff1f91bd08dd2d8eb96e434c641609c6e59d6605d155070"} Nov 26 15:39:10 crc kubenswrapper[5037]: I1126 15:39:10.753733 5037 scope.go:117] "RemoveContainer" containerID="b288b37e7c8d0186cff1f91bd08dd2d8eb96e434c641609c6e59d6605d155070" Nov 26 15:39:11 crc kubenswrapper[5037]: I1126 15:39:11.020834 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-hkdw9_must-gather-7cgw2_c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb/gather/0.log" Nov 26 15:39:16 crc kubenswrapper[5037]: I1126 15:39:16.908237 5037 scope.go:117] "RemoveContainer" containerID="2cf07bc1aad06044aecd8035db593b51d0d3f1bb53787812167d734b14f4cc67" Nov 26 15:39:16 crc kubenswrapper[5037]: E1126 15:39:16.911391 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:39:18 crc kubenswrapper[5037]: I1126 15:39:18.593006 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-hkdw9/must-gather-7cgw2"] Nov 26 15:39:18 crc kubenswrapper[5037]: I1126 15:39:18.593297 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-hkdw9/must-gather-7cgw2" podUID="c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb" containerName="copy" containerID="cri-o://3ded05fe92fd30e6936ae87cf15c3189b785d8810ebabaddd0719f8310a3538c" gracePeriod=2 Nov 26 15:39:18 crc kubenswrapper[5037]: I1126 15:39:18.601557 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-hkdw9/must-gather-7cgw2"] Nov 26 15:39:18 crc kubenswrapper[5037]: I1126 15:39:18.827405 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-hkdw9_must-gather-7cgw2_c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb/copy/0.log" Nov 26 15:39:18 crc kubenswrapper[5037]: I1126 15:39:18.828321 5037 generic.go:334] "Generic (PLEG): container finished" podID="c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb" containerID="3ded05fe92fd30e6936ae87cf15c3189b785d8810ebabaddd0719f8310a3538c" exitCode=143 Nov 26 15:39:19 crc kubenswrapper[5037]: I1126 15:39:19.035245 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-hkdw9_must-gather-7cgw2_c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb/copy/0.log" Nov 26 15:39:19 crc kubenswrapper[5037]: I1126 15:39:19.035985 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-hkdw9/must-gather-7cgw2" Nov 26 15:39:19 crc kubenswrapper[5037]: I1126 15:39:19.233833 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb-must-gather-output\") pod \"c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb\" (UID: \"c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb\") " Nov 26 15:39:19 crc kubenswrapper[5037]: I1126 15:39:19.234209 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8rkdt\" (UniqueName: \"kubernetes.io/projected/c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb-kube-api-access-8rkdt\") pod \"c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb\" (UID: \"c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb\") " Nov 26 15:39:19 crc kubenswrapper[5037]: I1126 15:39:19.241277 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb-kube-api-access-8rkdt" (OuterVolumeSpecName: "kube-api-access-8rkdt") pod "c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb" (UID: "c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb"). InnerVolumeSpecName "kube-api-access-8rkdt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:39:19 crc kubenswrapper[5037]: I1126 15:39:19.319449 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb" (UID: "c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:39:19 crc kubenswrapper[5037]: I1126 15:39:19.335716 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8rkdt\" (UniqueName: \"kubernetes.io/projected/c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb-kube-api-access-8rkdt\") on node \"crc\" DevicePath \"\"" Nov 26 15:39:19 crc kubenswrapper[5037]: I1126 15:39:19.335762 5037 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 26 15:39:19 crc kubenswrapper[5037]: I1126 15:39:19.837019 5037 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-hkdw9_must-gather-7cgw2_c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb/copy/0.log" Nov 26 15:39:19 crc kubenswrapper[5037]: I1126 15:39:19.837416 5037 scope.go:117] "RemoveContainer" containerID="3ded05fe92fd30e6936ae87cf15c3189b785d8810ebabaddd0719f8310a3538c" Nov 26 15:39:19 crc kubenswrapper[5037]: I1126 15:39:19.837482 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-hkdw9/must-gather-7cgw2" Nov 26 15:39:19 crc kubenswrapper[5037]: I1126 15:39:19.859634 5037 scope.go:117] "RemoveContainer" containerID="b288b37e7c8d0186cff1f91bd08dd2d8eb96e434c641609c6e59d6605d155070" Nov 26 15:39:19 crc kubenswrapper[5037]: I1126 15:39:19.916242 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb" path="/var/lib/kubelet/pods/c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb/volumes" Nov 26 15:39:27 crc kubenswrapper[5037]: I1126 15:39:27.908527 5037 scope.go:117] "RemoveContainer" containerID="2cf07bc1aad06044aecd8035db593b51d0d3f1bb53787812167d734b14f4cc67" Nov 26 15:39:27 crc kubenswrapper[5037]: E1126 15:39:27.909381 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:39:28 crc kubenswrapper[5037]: I1126 15:39:28.444373 5037 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-htv5g"] Nov 26 15:39:28 crc kubenswrapper[5037]: E1126 15:39:28.444740 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a2efbb6-cf00-4fad-a36f-c82fb4bab849" containerName="extract-content" Nov 26 15:39:28 crc kubenswrapper[5037]: I1126 15:39:28.444762 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a2efbb6-cf00-4fad-a36f-c82fb4bab849" containerName="extract-content" Nov 26 15:39:28 crc kubenswrapper[5037]: E1126 15:39:28.444779 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb" containerName="copy" Nov 26 15:39:28 crc kubenswrapper[5037]: I1126 15:39:28.444787 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb" containerName="copy" Nov 26 15:39:28 crc kubenswrapper[5037]: E1126 15:39:28.444799 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb" containerName="gather" Nov 26 15:39:28 crc kubenswrapper[5037]: I1126 15:39:28.444808 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb" containerName="gather" Nov 26 15:39:28 crc kubenswrapper[5037]: E1126 15:39:28.444832 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a2efbb6-cf00-4fad-a36f-c82fb4bab849" containerName="extract-utilities" Nov 26 15:39:28 crc kubenswrapper[5037]: I1126 15:39:28.444839 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a2efbb6-cf00-4fad-a36f-c82fb4bab849" containerName="extract-utilities" Nov 26 15:39:28 crc kubenswrapper[5037]: E1126 15:39:28.444850 5037 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a2efbb6-cf00-4fad-a36f-c82fb4bab849" containerName="registry-server" Nov 26 15:39:28 crc kubenswrapper[5037]: I1126 15:39:28.444859 5037 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a2efbb6-cf00-4fad-a36f-c82fb4bab849" containerName="registry-server" Nov 26 15:39:28 crc kubenswrapper[5037]: I1126 15:39:28.444992 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb" containerName="gather" Nov 26 15:39:28 crc kubenswrapper[5037]: I1126 15:39:28.445008 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a2efbb6-cf00-4fad-a36f-c82fb4bab849" containerName="registry-server" Nov 26 15:39:28 crc kubenswrapper[5037]: I1126 15:39:28.445023 5037 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5b4cc4f-a493-42cd-9c97-9b3f1465c6cb" containerName="copy" Nov 26 15:39:28 crc kubenswrapper[5037]: I1126 15:39:28.445979 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-htv5g" Nov 26 15:39:28 crc kubenswrapper[5037]: I1126 15:39:28.455392 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-htv5g"] Nov 26 15:39:28 crc kubenswrapper[5037]: I1126 15:39:28.478729 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/59757e7a-4346-4eac-834f-acfe43abf751-catalog-content\") pod \"certified-operators-htv5g\" (UID: \"59757e7a-4346-4eac-834f-acfe43abf751\") " pod="openshift-marketplace/certified-operators-htv5g" Nov 26 15:39:28 crc kubenswrapper[5037]: I1126 15:39:28.478793 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9lv6r\" (UniqueName: \"kubernetes.io/projected/59757e7a-4346-4eac-834f-acfe43abf751-kube-api-access-9lv6r\") pod \"certified-operators-htv5g\" (UID: \"59757e7a-4346-4eac-834f-acfe43abf751\") " pod="openshift-marketplace/certified-operators-htv5g" Nov 26 15:39:28 crc kubenswrapper[5037]: I1126 15:39:28.479197 5037 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/59757e7a-4346-4eac-834f-acfe43abf751-utilities\") pod \"certified-operators-htv5g\" (UID: \"59757e7a-4346-4eac-834f-acfe43abf751\") " pod="openshift-marketplace/certified-operators-htv5g" Nov 26 15:39:28 crc kubenswrapper[5037]: I1126 15:39:28.580433 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/59757e7a-4346-4eac-834f-acfe43abf751-utilities\") pod \"certified-operators-htv5g\" (UID: \"59757e7a-4346-4eac-834f-acfe43abf751\") " pod="openshift-marketplace/certified-operators-htv5g" Nov 26 15:39:28 crc kubenswrapper[5037]: I1126 15:39:28.580501 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/59757e7a-4346-4eac-834f-acfe43abf751-catalog-content\") pod \"certified-operators-htv5g\" (UID: \"59757e7a-4346-4eac-834f-acfe43abf751\") " pod="openshift-marketplace/certified-operators-htv5g" Nov 26 15:39:28 crc kubenswrapper[5037]: I1126 15:39:28.580528 5037 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9lv6r\" (UniqueName: \"kubernetes.io/projected/59757e7a-4346-4eac-834f-acfe43abf751-kube-api-access-9lv6r\") pod \"certified-operators-htv5g\" (UID: \"59757e7a-4346-4eac-834f-acfe43abf751\") " pod="openshift-marketplace/certified-operators-htv5g" Nov 26 15:39:28 crc kubenswrapper[5037]: I1126 15:39:28.580988 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/59757e7a-4346-4eac-834f-acfe43abf751-utilities\") pod \"certified-operators-htv5g\" (UID: \"59757e7a-4346-4eac-834f-acfe43abf751\") " pod="openshift-marketplace/certified-operators-htv5g" Nov 26 15:39:28 crc kubenswrapper[5037]: I1126 15:39:28.581315 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/59757e7a-4346-4eac-834f-acfe43abf751-catalog-content\") pod \"certified-operators-htv5g\" (UID: \"59757e7a-4346-4eac-834f-acfe43abf751\") " pod="openshift-marketplace/certified-operators-htv5g" Nov 26 15:39:28 crc kubenswrapper[5037]: I1126 15:39:28.603938 5037 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9lv6r\" (UniqueName: \"kubernetes.io/projected/59757e7a-4346-4eac-834f-acfe43abf751-kube-api-access-9lv6r\") pod \"certified-operators-htv5g\" (UID: \"59757e7a-4346-4eac-834f-acfe43abf751\") " pod="openshift-marketplace/certified-operators-htv5g" Nov 26 15:39:28 crc kubenswrapper[5037]: I1126 15:39:28.765523 5037 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-htv5g" Nov 26 15:39:29 crc kubenswrapper[5037]: I1126 15:39:29.212114 5037 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-htv5g"] Nov 26 15:39:29 crc kubenswrapper[5037]: I1126 15:39:29.934679 5037 generic.go:334] "Generic (PLEG): container finished" podID="59757e7a-4346-4eac-834f-acfe43abf751" containerID="343ff904b345a0a042b61aa1fa2da90977a84f33bed36cd48f8c8700b176b53b" exitCode=0 Nov 26 15:39:29 crc kubenswrapper[5037]: I1126 15:39:29.934741 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-htv5g" event={"ID":"59757e7a-4346-4eac-834f-acfe43abf751","Type":"ContainerDied","Data":"343ff904b345a0a042b61aa1fa2da90977a84f33bed36cd48f8c8700b176b53b"} Nov 26 15:39:29 crc kubenswrapper[5037]: I1126 15:39:29.935807 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-htv5g" event={"ID":"59757e7a-4346-4eac-834f-acfe43abf751","Type":"ContainerStarted","Data":"03efe74cdd04e245e8712e64228e52db925e36d6897b4be60fd1228304b9bc0c"} Nov 26 15:39:30 crc kubenswrapper[5037]: I1126 15:39:30.943593 5037 generic.go:334] "Generic (PLEG): container finished" podID="59757e7a-4346-4eac-834f-acfe43abf751" containerID="8663f9a15f764ef7c325aac260c388b21b0a5ff2bdf3737eccdf2494822af259" exitCode=0 Nov 26 15:39:30 crc kubenswrapper[5037]: I1126 15:39:30.943685 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-htv5g" event={"ID":"59757e7a-4346-4eac-834f-acfe43abf751","Type":"ContainerDied","Data":"8663f9a15f764ef7c325aac260c388b21b0a5ff2bdf3737eccdf2494822af259"} Nov 26 15:39:32 crc kubenswrapper[5037]: I1126 15:39:32.964914 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-htv5g" event={"ID":"59757e7a-4346-4eac-834f-acfe43abf751","Type":"ContainerStarted","Data":"435d3ffcd94353d08fe60af98a24cc1a56456a4ab3ae29e284d2977322b63156"} Nov 26 15:39:33 crc kubenswrapper[5037]: I1126 15:39:33.005730 5037 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-htv5g" podStartSLOduration=2.768568045 podStartE2EDuration="5.005702688s" podCreationTimestamp="2025-11-26 15:39:28 +0000 UTC" firstStartedPulling="2025-11-26 15:39:29.936662736 +0000 UTC m=+5036.733432930" lastFinishedPulling="2025-11-26 15:39:32.173797349 +0000 UTC m=+5038.970567573" observedRunningTime="2025-11-26 15:39:32.996670269 +0000 UTC m=+5039.793440483" watchObservedRunningTime="2025-11-26 15:39:33.005702688 +0000 UTC m=+5039.802472902" Nov 26 15:39:38 crc kubenswrapper[5037]: I1126 15:39:38.766490 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-htv5g" Nov 26 15:39:38 crc kubenswrapper[5037]: I1126 15:39:38.766937 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-htv5g" Nov 26 15:39:38 crc kubenswrapper[5037]: I1126 15:39:38.816647 5037 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-htv5g" Nov 26 15:39:39 crc kubenswrapper[5037]: I1126 15:39:39.063188 5037 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-htv5g" Nov 26 15:39:39 crc kubenswrapper[5037]: I1126 15:39:39.109234 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-htv5g"] Nov 26 15:39:40 crc kubenswrapper[5037]: I1126 15:39:40.908851 5037 scope.go:117] "RemoveContainer" containerID="2cf07bc1aad06044aecd8035db593b51d0d3f1bb53787812167d734b14f4cc67" Nov 26 15:39:40 crc kubenswrapper[5037]: E1126 15:39:40.909615 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:39:41 crc kubenswrapper[5037]: I1126 15:39:41.023093 5037 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-htv5g" podUID="59757e7a-4346-4eac-834f-acfe43abf751" containerName="registry-server" containerID="cri-o://435d3ffcd94353d08fe60af98a24cc1a56456a4ab3ae29e284d2977322b63156" gracePeriod=2 Nov 26 15:39:42 crc kubenswrapper[5037]: I1126 15:39:42.032737 5037 generic.go:334] "Generic (PLEG): container finished" podID="59757e7a-4346-4eac-834f-acfe43abf751" containerID="435d3ffcd94353d08fe60af98a24cc1a56456a4ab3ae29e284d2977322b63156" exitCode=0 Nov 26 15:39:42 crc kubenswrapper[5037]: I1126 15:39:42.032795 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-htv5g" event={"ID":"59757e7a-4346-4eac-834f-acfe43abf751","Type":"ContainerDied","Data":"435d3ffcd94353d08fe60af98a24cc1a56456a4ab3ae29e284d2977322b63156"} Nov 26 15:39:42 crc kubenswrapper[5037]: I1126 15:39:42.451880 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-htv5g" Nov 26 15:39:42 crc kubenswrapper[5037]: I1126 15:39:42.588886 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/59757e7a-4346-4eac-834f-acfe43abf751-utilities\") pod \"59757e7a-4346-4eac-834f-acfe43abf751\" (UID: \"59757e7a-4346-4eac-834f-acfe43abf751\") " Nov 26 15:39:42 crc kubenswrapper[5037]: I1126 15:39:42.589021 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9lv6r\" (UniqueName: \"kubernetes.io/projected/59757e7a-4346-4eac-834f-acfe43abf751-kube-api-access-9lv6r\") pod \"59757e7a-4346-4eac-834f-acfe43abf751\" (UID: \"59757e7a-4346-4eac-834f-acfe43abf751\") " Nov 26 15:39:42 crc kubenswrapper[5037]: I1126 15:39:42.589048 5037 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/59757e7a-4346-4eac-834f-acfe43abf751-catalog-content\") pod \"59757e7a-4346-4eac-834f-acfe43abf751\" (UID: \"59757e7a-4346-4eac-834f-acfe43abf751\") " Nov 26 15:39:42 crc kubenswrapper[5037]: I1126 15:39:42.590402 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/59757e7a-4346-4eac-834f-acfe43abf751-utilities" (OuterVolumeSpecName: "utilities") pod "59757e7a-4346-4eac-834f-acfe43abf751" (UID: "59757e7a-4346-4eac-834f-acfe43abf751"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:39:42 crc kubenswrapper[5037]: I1126 15:39:42.597956 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59757e7a-4346-4eac-834f-acfe43abf751-kube-api-access-9lv6r" (OuterVolumeSpecName: "kube-api-access-9lv6r") pod "59757e7a-4346-4eac-834f-acfe43abf751" (UID: "59757e7a-4346-4eac-834f-acfe43abf751"). InnerVolumeSpecName "kube-api-access-9lv6r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:39:42 crc kubenswrapper[5037]: I1126 15:39:42.659742 5037 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/59757e7a-4346-4eac-834f-acfe43abf751-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "59757e7a-4346-4eac-834f-acfe43abf751" (UID: "59757e7a-4346-4eac-834f-acfe43abf751"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:39:42 crc kubenswrapper[5037]: I1126 15:39:42.691802 5037 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/59757e7a-4346-4eac-834f-acfe43abf751-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 15:39:42 crc kubenswrapper[5037]: I1126 15:39:42.691903 5037 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9lv6r\" (UniqueName: \"kubernetes.io/projected/59757e7a-4346-4eac-834f-acfe43abf751-kube-api-access-9lv6r\") on node \"crc\" DevicePath \"\"" Nov 26 15:39:42 crc kubenswrapper[5037]: I1126 15:39:42.691924 5037 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/59757e7a-4346-4eac-834f-acfe43abf751-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 15:39:43 crc kubenswrapper[5037]: I1126 15:39:43.043086 5037 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-htv5g" event={"ID":"59757e7a-4346-4eac-834f-acfe43abf751","Type":"ContainerDied","Data":"03efe74cdd04e245e8712e64228e52db925e36d6897b4be60fd1228304b9bc0c"} Nov 26 15:39:43 crc kubenswrapper[5037]: I1126 15:39:43.043163 5037 scope.go:117] "RemoveContainer" containerID="435d3ffcd94353d08fe60af98a24cc1a56456a4ab3ae29e284d2977322b63156" Nov 26 15:39:43 crc kubenswrapper[5037]: I1126 15:39:43.043426 5037 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-htv5g" Nov 26 15:39:43 crc kubenswrapper[5037]: I1126 15:39:43.071622 5037 scope.go:117] "RemoveContainer" containerID="8663f9a15f764ef7c325aac260c388b21b0a5ff2bdf3737eccdf2494822af259" Nov 26 15:39:43 crc kubenswrapper[5037]: I1126 15:39:43.103017 5037 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-htv5g"] Nov 26 15:39:43 crc kubenswrapper[5037]: I1126 15:39:43.111106 5037 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-htv5g"] Nov 26 15:39:43 crc kubenswrapper[5037]: I1126 15:39:43.120712 5037 scope.go:117] "RemoveContainer" containerID="343ff904b345a0a042b61aa1fa2da90977a84f33bed36cd48f8c8700b176b53b" Nov 26 15:39:43 crc kubenswrapper[5037]: I1126 15:39:43.917277 5037 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="59757e7a-4346-4eac-834f-acfe43abf751" path="/var/lib/kubelet/pods/59757e7a-4346-4eac-834f-acfe43abf751/volumes" Nov 26 15:39:51 crc kubenswrapper[5037]: I1126 15:39:51.909616 5037 scope.go:117] "RemoveContainer" containerID="2cf07bc1aad06044aecd8035db593b51d0d3f1bb53787812167d734b14f4cc67" Nov 26 15:39:51 crc kubenswrapper[5037]: E1126 15:39:51.910827 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:40:04 crc kubenswrapper[5037]: I1126 15:40:04.908947 5037 scope.go:117] "RemoveContainer" containerID="2cf07bc1aad06044aecd8035db593b51d0d3f1bb53787812167d734b14f4cc67" Nov 26 15:40:04 crc kubenswrapper[5037]: E1126 15:40:04.909989 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:40:15 crc kubenswrapper[5037]: I1126 15:40:15.908253 5037 scope.go:117] "RemoveContainer" containerID="2cf07bc1aad06044aecd8035db593b51d0d3f1bb53787812167d734b14f4cc67" Nov 26 15:40:15 crc kubenswrapper[5037]: E1126 15:40:15.908993 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:40:30 crc kubenswrapper[5037]: I1126 15:40:30.908896 5037 scope.go:117] "RemoveContainer" containerID="2cf07bc1aad06044aecd8035db593b51d0d3f1bb53787812167d734b14f4cc67" Nov 26 15:40:30 crc kubenswrapper[5037]: E1126 15:40:30.909636 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:40:45 crc kubenswrapper[5037]: I1126 15:40:45.908350 5037 scope.go:117] "RemoveContainer" containerID="2cf07bc1aad06044aecd8035db593b51d0d3f1bb53787812167d734b14f4cc67" Nov 26 15:40:45 crc kubenswrapper[5037]: E1126 15:40:45.909269 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:40:58 crc kubenswrapper[5037]: I1126 15:40:58.908204 5037 scope.go:117] "RemoveContainer" containerID="2cf07bc1aad06044aecd8035db593b51d0d3f1bb53787812167d734b14f4cc67" Nov 26 15:40:58 crc kubenswrapper[5037]: E1126 15:40:58.909213 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:41:11 crc kubenswrapper[5037]: I1126 15:41:11.907999 5037 scope.go:117] "RemoveContainer" containerID="2cf07bc1aad06044aecd8035db593b51d0d3f1bb53787812167d734b14f4cc67" Nov 26 15:41:11 crc kubenswrapper[5037]: E1126 15:41:11.908808 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" Nov 26 15:41:26 crc kubenswrapper[5037]: I1126 15:41:26.909110 5037 scope.go:117] "RemoveContainer" containerID="2cf07bc1aad06044aecd8035db593b51d0d3f1bb53787812167d734b14f4cc67" Nov 26 15:41:26 crc kubenswrapper[5037]: E1126 15:41:26.910215 5037 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8jk2d_openshift-machine-config-operator(8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-8jk2d" podUID="8bbdf8d8-f2ed-4b76-929a-a1a6c07e85fb" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515111617661024452 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015111617662017370 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015111605201016475 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015111605201015445 5ustar corecore